Botan  2.1.0
Crypto and TLS for C++11
locking_allocator.cpp
Go to the documentation of this file.
1 /*
2 * Mlock Allocator
3 * (C) 2012,2014,2015 Jack Lloyd
4 *
5 * Botan is released under the Simplified BSD License (see license.txt)
6 */
7 
8 #include <botan/locking_allocator.h>
9 #include <botan/internal/os_utils.h>
10 #include <botan/mem_ops.h>
11 #include <algorithm>
12 #include <cstdlib>
13 #include <string>
14 #include <botan/mutex.h>
15 
16 namespace Botan {
17 
18 namespace {
19 
20 bool ptr_in_pool(const void* pool_ptr, size_t poolsize,
21  const void* buf_ptr, size_t bufsize)
22  {
23  const uintptr_t pool = reinterpret_cast<uintptr_t>(pool_ptr);
24  const uintptr_t buf = reinterpret_cast<uintptr_t>(buf_ptr);
25 
26  if(buf < pool || buf >= pool + poolsize)
27  return false;
28 
29  BOTAN_ASSERT(buf + bufsize <= pool + poolsize,
30  "Pointer does not partially overlap pool");
31 
32  return true;
33  }
34 
35 size_t padding_for_alignment(size_t offset, size_t desired_alignment)
36  {
37  size_t mod = offset % desired_alignment;
38  if(mod == 0)
39  return 0; // already right on
40  return desired_alignment - mod;
41  }
42 
43 }
44 
45 void* mlock_allocator::allocate(size_t num_elems, size_t elem_size)
46  {
47  if(!m_pool)
48  return nullptr;
49 
50  const size_t n = num_elems * elem_size;
51  const size_t alignment = 16;
52 
53  if(n / elem_size != num_elems)
54  return nullptr; // overflow!
55 
56  if(n > m_poolsize)
57  return nullptr;
58  if(n < BOTAN_MLOCK_ALLOCATOR_MIN_ALLOCATION || n > BOTAN_MLOCK_ALLOCATOR_MAX_ALLOCATION)
59  return nullptr;
60 
61  lock_guard_type<mutex_type> lock(m_mutex);
62 
63  auto best_fit = m_freelist.end();
64 
65  for(auto i = m_freelist.begin(); i != m_freelist.end(); ++i)
66  {
67  // If we have a perfect fit, use it immediately
68  if(i->second == n && (i->first % alignment) == 0)
69  {
70  const size_t offset = i->first;
71  m_freelist.erase(i);
72  clear_mem(m_pool + offset, n);
73 
74  BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset) % alignment == 0,
75  "Returning correctly aligned pointer");
76 
77  return m_pool + offset;
78  }
79 
80  if((i->second >= (n + padding_for_alignment(i->first, alignment)) &&
81  ((best_fit == m_freelist.end()) || (best_fit->second > i->second))))
82  {
83  best_fit = i;
84  }
85  }
86 
87  if(best_fit != m_freelist.end())
88  {
89  const size_t offset = best_fit->first;
90 
91  const size_t alignment_padding = padding_for_alignment(offset, alignment);
92 
93  best_fit->first += n + alignment_padding;
94  best_fit->second -= n + alignment_padding;
95 
96  // Need to realign, split the block
97  if(alignment_padding)
98  {
99  /*
100  If we used the entire block except for small piece used for
101  alignment at the beginning, so just update the entry already
102  in place (as it is in the correct location), rather than
103  deleting the empty range and inserting the new one in the
104  same location.
105  */
106  if(best_fit->second == 0)
107  {
108  best_fit->first = offset;
109  best_fit->second = alignment_padding;
110  }
111  else
112  m_freelist.insert(best_fit, std::make_pair(offset, alignment_padding));
113  }
114 
115  clear_mem(m_pool + offset + alignment_padding, n);
116 
117  BOTAN_ASSERT((reinterpret_cast<size_t>(m_pool) + offset + alignment_padding) % alignment == 0,
118  "Returning correctly aligned pointer");
119 
120  return m_pool + offset + alignment_padding;
121  }
122 
123  return nullptr;
124  }
125 
126 bool mlock_allocator::deallocate(void* p, size_t num_elems, size_t elem_size)
127  {
128  if(!m_pool)
129  return false;
130 
131  size_t n = num_elems * elem_size;
132 
133  /*
134  We return nullptr in allocate if there was an overflow, so we
135  should never ever see an overflow in a deallocation.
136  */
137  BOTAN_ASSERT(n / elem_size == num_elems,
138  "No overflow in deallocation");
139 
140  if(!ptr_in_pool(m_pool, m_poolsize, p, n))
141  return false;
142 
143  std::memset(p, 0, n);
144 
145  lock_guard_type<mutex_type> lock(m_mutex);
146 
147  const size_t start = static_cast<uint8_t*>(p) - m_pool;
148 
149  auto comp = [](std::pair<size_t, size_t> x, std::pair<size_t, size_t> y){ return x.first < y.first; };
150 
151  auto i = std::lower_bound(m_freelist.begin(), m_freelist.end(),
152  std::make_pair(start, 0), comp);
153 
154  // try to merge with later block
155  if(i != m_freelist.end() && start + n == i->first)
156  {
157  i->first = start;
158  i->second += n;
159  n = 0;
160  }
161 
162  // try to merge with previous block
163  if(i != m_freelist.begin())
164  {
165  auto prev = std::prev(i);
166 
167  if(prev->first + prev->second == start)
168  {
169  if(n)
170  {
171  prev->second += n;
172  n = 0;
173  }
174  else
175  {
176  // merge adjoining
177  prev->second += i->second;
178  m_freelist.erase(i);
179  }
180  }
181  }
182 
183  if(n != 0) // no merge possible?
184  m_freelist.insert(i, std::make_pair(start, n));
185 
186  return true;
187  }
188 
189 mlock_allocator::mlock_allocator()
190  {
191  const size_t mem_to_lock = OS::get_memory_locking_limit();
192 
193  /*
194  TODO: split into multiple single page allocations to
195  help ASLR and guard pages to help reduce the damage of
196  a wild reads or write by the application.
197  */
198 
199  if(mem_to_lock)
200  {
201  m_pool = static_cast<uint8_t*>(OS::allocate_locked_pages(mem_to_lock));
202 
203  if(m_pool != nullptr)
204  {
205  m_poolsize = mem_to_lock;
206  m_freelist.push_back(std::make_pair(0, m_poolsize));
207  }
208  }
209  }
210 
211 mlock_allocator::~mlock_allocator()
212  {
213  if(m_pool)
214  {
215  secure_scrub_memory(m_pool, m_poolsize);
216  OS::free_locked_pages(m_pool, m_poolsize);
217  m_pool = nullptr;
218  }
219  }
220 
222  {
223  static mlock_allocator mlock;
224  return mlock;
225  }
226 
227 }
void secure_scrub_memory(void *ptr, size_t n)
Definition: mem_ops.cpp:17
static mlock_allocator & instance()
void clear_mem(T *ptr, size_t n)
Definition: mem_ops.h:57
void * allocate_locked_pages(size_t length)
Definition: os_utils.cpp:257
bool deallocate(void *p, size_t num_elems, size_t elem_size)
void * allocate(size_t num_elems, size_t elem_size)
size_t get_memory_locking_limit()
Definition: os_utils.cpp:171
#define BOTAN_ASSERT(expr, assertion_made)
Definition: assert.h:27
Definition: alg_id.cpp:13
void free_locked_pages(void *ptr, size_t length)
Definition: os_utils.cpp:314