You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

pagelocker.h 6.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. // Copyright (c) 2009-2010 Satoshi Nakamoto
  2. // Copyright (c) 2009-2013 The Bitcoin Core developers
  3. // Distributed under the MIT software license, see the accompanying
  4. // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  5. #ifndef BITCOIN_SUPPORT_PAGELOCKER_H
  6. #define BITCOIN_SUPPORT_PAGELOCKER_H
  7. #include "support/cleanse.h"
  8. #include <map>
  9. #include <boost/thread/mutex.hpp>
  10. #include <boost/thread/once.hpp>
  11. /**
  12. * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
  13. *
  14. * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
  15. * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
  16. * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
  17. *
  18. * @note By using a map from each page base address to lock count, this class is optimized for
  19. * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
  20. * something like an interval tree would be the preferred data structure.
  21. */
  22. template <class Locker>
  23. class LockedPageManagerBase
  24. {
  25. public:
  26. LockedPageManagerBase(size_t page_size) : page_size(page_size)
  27. {
  28. // Determine bitmask for extracting page from address
  29. assert(!(page_size & (page_size - 1))); // size must be power of two
  30. page_mask = ~(page_size - 1);
  31. }
  32. ~LockedPageManagerBase()
  33. {
  34. assert(this->GetLockedPageCount() == 0);
  35. }
  36. // For all pages in affected range, increase lock count
  37. void LockRange(void* p, size_t size)
  38. {
  39. boost::mutex::scoped_lock lock(mutex);
  40. if (!size)
  41. return;
  42. const size_t base_addr = reinterpret_cast<size_t>(p);
  43. const size_t start_page = base_addr & page_mask;
  44. const size_t end_page = (base_addr + size - 1) & page_mask;
  45. for (size_t page = start_page; page <= end_page; page += page_size) {
  46. Histogram::iterator it = histogram.find(page);
  47. if (it == histogram.end()) // Newly locked page
  48. {
  49. locker.Lock(reinterpret_cast<void*>(page), page_size);
  50. histogram.insert(std::make_pair(page, 1));
  51. } else // Page was already locked; increase counter
  52. {
  53. it->second += 1;
  54. }
  55. }
  56. }
  57. // For all pages in affected range, decrease lock count
  58. void UnlockRange(void* p, size_t size)
  59. {
  60. boost::mutex::scoped_lock lock(mutex);
  61. if (!size)
  62. return;
  63. const size_t base_addr = reinterpret_cast<size_t>(p);
  64. const size_t start_page = base_addr & page_mask;
  65. const size_t end_page = (base_addr + size - 1) & page_mask;
  66. for (size_t page = start_page; page <= end_page; page += page_size) {
  67. Histogram::iterator it = histogram.find(page);
  68. assert(it != histogram.end()); // Cannot unlock an area that was not locked
  69. // Decrease counter for page, when it is zero, the page will be unlocked
  70. it->second -= 1;
  71. if (it->second == 0) // Nothing on the page anymore that keeps it locked
  72. {
  73. // Unlock page and remove the count from histogram
  74. locker.Unlock(reinterpret_cast<void*>(page), page_size);
  75. histogram.erase(it);
  76. }
  77. }
  78. }
  79. // Get number of locked pages for diagnostics
  80. int GetLockedPageCount()
  81. {
  82. boost::mutex::scoped_lock lock(mutex);
  83. return histogram.size();
  84. }
  85. private:
  86. Locker locker;
  87. boost::mutex mutex;
  88. size_t page_size, page_mask;
  89. // map of page base address to lock count
  90. typedef std::map<size_t, int> Histogram;
  91. Histogram histogram;
  92. };
  93. /**
  94. * OS-dependent memory page locking/unlocking.
  95. * Defined as policy class to make stubbing for test possible.
  96. */
  97. class MemoryPageLocker
  98. {
  99. public:
  100. /** Lock memory pages.
  101. * addr and len must be a multiple of the system page size
  102. */
  103. bool Lock(const void* addr, size_t len);
  104. /** Unlock memory pages.
  105. * addr and len must be a multiple of the system page size
  106. */
  107. bool Unlock(const void* addr, size_t len);
  108. };
  109. /**
  110. * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
  111. * std::allocator templates.
  112. *
  113. * Some implementations of the STL allocate memory in some constructors (i.e., see
  114. * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
  115. * Due to the unpredictable order of static initializers, we have to make sure the
  116. * LockedPageManager instance exists before any other STL-based objects that use
  117. * secure_allocator are created. So instead of having LockedPageManager also be
  118. * static-initialized, it is created on demand.
  119. */
  120. class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker>
  121. {
  122. public:
  123. static LockedPageManager& Instance()
  124. {
  125. boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
  126. return *LockedPageManager::_instance;
  127. }
  128. private:
  129. LockedPageManager();
  130. static void CreateInstance()
  131. {
  132. // Using a local static instance guarantees that the object is initialized
  133. // when it's first needed and also deinitialized after all objects that use
  134. // it are done with it. I can think of one unlikely scenario where we may
  135. // have a static deinitialization order/problem, but the check in
  136. // LockedPageManagerBase's destructor helps us detect if that ever happens.
  137. static LockedPageManager instance;
  138. LockedPageManager::_instance = &instance;
  139. }
  140. static LockedPageManager* _instance;
  141. static boost::once_flag init_flag;
  142. };
  143. //
  144. // Functions for directly locking/unlocking memory objects.
  145. // Intended for non-dynamically allocated structures.
  146. //
  147. template <typename T>
  148. void LockObject(const T& t)
  149. {
  150. LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
  151. }
  152. template <typename T>
  153. void UnlockObject(const T& t)
  154. {
  155. memory_cleanse((void*)(&t), sizeof(T));
  156. LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
  157. }
  158. #endif // BITCOIN_SUPPORT_PAGELOCKER_H