1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2012 The Bitcoin developers
3 // Distributed under the MIT/X11 software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_ALLOCATORS_H
6 #define BITCOIN_ALLOCATORS_H
10 #include <boost/thread/mutex.hpp>
12 #include <openssl/crypto.h> // for OPENSSL_cleanse()
18 #define _WIN32_WINNT 0x0501
19 #define WIN32_LEAN_AND_MEAN 1
24 // This is used to attempt to keep keying material out of swap
25 // Note that VirtualLock does not provide this as a guarantee on Windows,
26 // but, in practice, memory that has been VirtualLock'd almost never gets written to
27 // the pagefile except in rare circumstances where memory is extremely low.
30 #include <limits.h> // for PAGESIZE
31 #include <unistd.h> // for sysconf
35 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
37 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
38 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
39 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
41 * @note By using a map from each page base address to lock count, this class is optimized for
42 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
43 * something like an interval tree would be the preferred data structure.
45 template <class Locker> class LockedPageManagerBase
48 LockedPageManagerBase(size_t page_size):
51 // Determine bitmask for extracting page from address
52 assert(!(page_size & (page_size-1))); // size must be power of two
53 page_mask = ~(page_size - 1);
56 // For all pages in affected range, increase lock count
57 void LockRange(void *p, size_t size)
59 boost::mutex::scoped_lock lock(mutex);
61 const size_t base_addr = reinterpret_cast<size_t>(p);
62 const size_t start_page = base_addr & page_mask;
63 const size_t end_page = (base_addr + size - 1) & page_mask;
64 for(size_t page = start_page; page <= end_page; page += page_size)
66 Histogram::iterator it = histogram.find(page);
67 if(it == histogram.end()) // Newly locked page
69 locker.Lock(reinterpret_cast<void*>(page), page_size);
70 histogram.insert(std::make_pair(page, 1));
72 else // Page was already locked; increase counter
79 // For all pages in affected range, decrease lock count
80 void UnlockRange(void *p, size_t size)
82 boost::mutex::scoped_lock lock(mutex);
84 const size_t base_addr = reinterpret_cast<size_t>(p);
85 const size_t start_page = base_addr & page_mask;
86 const size_t end_page = (base_addr + size - 1) & page_mask;
87 for(size_t page = start_page; page <= end_page; page += page_size)
89 Histogram::iterator it = histogram.find(page);
90 assert(it != histogram.end()); // Cannot unlock an area that was not locked
91 // Decrease counter for page, when it is zero, the page will be unlocked
93 if(it->second == 0) // Nothing on the page anymore that keeps it locked
95 // Unlock page and remove the count from histogram
96 locker.Unlock(reinterpret_cast<void*>(page), page_size);
102 // Get number of locked pages for diagnostics
103 int GetLockedPageCount()
105 boost::mutex::scoped_lock lock(mutex);
106 return histogram.size();
112 size_t page_size, page_mask;
113 // map of page base address to lock count
114 typedef std::map<size_t,int> Histogram;
118 /** Determine system page size in bytes */
119 static inline size_t GetSystemPageSize()
123 SYSTEM_INFO sSysInfo;
124 GetSystemInfo(&sSysInfo);
125 page_size = sSysInfo.dwPageSize;
126 #elif defined(PAGESIZE) // defined in limits.h
127 page_size = PAGESIZE;
128 #else // assume some POSIX OS
129 page_size = sysconf(_SC_PAGESIZE);
135 * OS-dependent memory page locking/unlocking.
136 * Defined as policy class to make stubbing for test possible.
138 class MemoryPageLocker
141 /** Lock memory pages.
142 * addr and len must be a multiple of the system page size
144 bool Lock(const void *addr, size_t len)
147 return VirtualLock(const_cast<void*>(addr), len);
149 return mlock(addr, len) == 0;
152 /** Unlock memory pages.
153 * addr and len must be a multiple of the system page size
155 bool Unlock(const void *addr, size_t len)
158 return VirtualUnlock(const_cast<void*>(addr), len);
160 return munlock(addr, len) == 0;
166 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
167 * std::allocator templates.
169 class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
172 static LockedPageManager instance; // instantiated in util.cpp
175 LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
180 // Allocator that locks its contents from being paged
181 // out of memory and clears its contents before deletion.
184 struct secure_allocator : public std::allocator<T>
186 // MSVC8 default copy constructor is broken
187 typedef std::allocator<T> base;
188 typedef typename base::size_type size_type;
189 typedef typename base::difference_type difference_type;
190 typedef typename base::pointer pointer;
191 typedef typename base::const_pointer const_pointer;
192 typedef typename base::reference reference;
193 typedef typename base::const_reference const_reference;
194 typedef typename base::value_type value_type;
195 secure_allocator() throw() {}
196 secure_allocator(const secure_allocator& a) throw() : base(a) {}
197 template <typename U>
198 secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
199 ~secure_allocator() throw() {}
200 template<typename _Other> struct rebind
201 { typedef secure_allocator<_Other> other; };
203 T* allocate(std::size_t n, const void *hint = 0)
206 p = std::allocator<T>::allocate(n, hint);
208 LockedPageManager::instance.LockRange(p, sizeof(T) * n);
212 void deallocate(T* p, std::size_t n)
216 OPENSSL_cleanse(p, sizeof(T) * n);
217 LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
219 std::allocator<T>::deallocate(p, n);
225 // Allocator that clears its contents before deletion.
228 struct zero_after_free_allocator : public std::allocator<T>
230 // MSVC8 default copy constructor is broken
231 typedef std::allocator<T> base;
232 typedef typename base::size_type size_type;
233 typedef typename base::difference_type difference_type;
234 typedef typename base::pointer pointer;
235 typedef typename base::const_pointer const_pointer;
236 typedef typename base::reference reference;
237 typedef typename base::const_reference const_reference;
238 typedef typename base::value_type value_type;
239 zero_after_free_allocator() throw() {}
240 zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
241 template <typename U>
242 zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
243 ~zero_after_free_allocator() throw() {}
244 template<typename _Other> struct rebind
245 { typedef zero_after_free_allocator<_Other> other; };
247 void deallocate(T* p, std::size_t n)
250 OPENSSL_cleanse(p, sizeof(T) * n);
251 std::allocator<T>::deallocate(p, n);
255 // This is exactly like std::string, but with a custom allocator.
256 typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;