HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
UT_ThreadSafeCache.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_ThreadSafeCache.h ( UT Library, C++ )
7  *
8  * COMMENTS: A fast, thread-safe cache to be used for high-performance
9  * applications.
10  */
11 
12 #ifndef __UT_ThreadSafeCache__
13 #define __UT_ThreadSafeCache__
14 
15 #include "UT_API.h"
16 #include <map>
17 #include <SYS/SYS_AtomicInt.h>
18 #include <SYS/SYS_MemoryOrder.h>
19 #include <SYS/SYS_Types.h>
20 #include "UT_LockUtil.h"
21 #include "UT_Lock.h"
22 #include "UT_StopWatch.h"
23 
24 //#define ITEM_TIMING
25 
26 // Explicit instantiations are provided for:
27 // - UT_Lock
28 // - UT_RecursiveSpinLock
29 template <class Lock>
31 public:
32  class unsafe_traverser;
33 
34  // A cache item stub. You must implement a subclass to provide
35  // allocation and deallocation routines.
37  {
38  public:
40  : myPrev(0), myNext(0), myMemory(0)
41  , myRefCount(0), myTimeStamp(0)
42  , myAllocated(false) {}
43  virtual ~UT_CacheItem() { UT_ASSERT(!getRefCount()); }
44 
45  // Return a string to be used in diagnostic messages about
46  // different cache types.
47  virtual const char *getItemType() const = 0;
48 
49  // Retrieve the amount of memory consumed by the cache item
50  int64 getMemoryUsage() const { return myMemory; }
51 
52  protected:
53  // Allocate an item and return the total amount of memory consumed
54  // by the cache item in bytes.
55  virtual int64 allocate(void *parms) = 0;
56 
57  // Deallocate the item and free associated memory.
58  virtual void deallocate(void *user_data) = 0;
59 
60  private:
61  int getRefCount() const { return myRefCount; }
62  int incRefCount() { return myRefCount.add(1); }
63  int decRefCount() { return myRefCount.add(-1); }
64 
65  bool isInCache() const { return myNext; }
66 
67  // Unlink the item, leaving the existing next/prev unchanged (clear
68  // them with clearLinks()).
69  void unlink()
70  {
71  myPrev->myNext = myNext;
72  myNext->myPrev = myPrev;
73  }
74  // Set links to 0
75  void clearLinks()
76  {
77  myPrev = 0;
78  myNext = 0;
79  }
80  // Insert the item before the given item
81  void link(UT_CacheItem *before)
82  {
83  // NOTE: Although isInCache() is called from
84  // unlocked code, and it only checks whether
85  // myNext is non-NULL, nothing after that depends
86  // on the other values set in this function, so
87  // the other values here don't need to be set
88  // first.
89 
90  myNext = before;
91  myPrev = before->myPrev;
92 
93  before->myPrev->myNext = this;
94  before->myPrev = this;
95  }
96  // Link to ourselves - for a circular list of size 1
97  void selfLink()
98  {
99  myNext = this;
100  myPrev = this;
101  }
102 
103  private:
104  // Force the compiler to read/write myNext and myPrev when
105  // indicated, instead of saving temporary values or reordering
106  // the access. myNext is used to determine if this is cached,
107  // which could be changed by another thread, so is important
108  // to re-read each time.
109  UT_CacheItem *volatile myNext;
110  UT_CacheItem *volatile myPrev;
111 
112  // NOTE: Even myMemory and myTimeStamp must be volatile, because
113  // they get read before locking, then read again after locking,
114  // possibly having been written by another thread in between.
115  volatile int64 myMemory;
116  volatile uint64 myTimeStamp;
117 #ifdef ITEM_TIMING
118  double myAllocationTime;
119 #endif
120  SYS_AtomicInt32 myRefCount;
121 
122  // Force the compiler to read/write myAllocated when
123  // indicated, instead of saving temporary values or reordering
124  // the access. myAllocated could be changed by another thread,
125  // upon allocating or deallocating, so is important to re-read
126  // each time.
127  volatile bool myAllocated;
128 
129  friend class UT_ThreadSafeCache;
131  };
132 
133 public:
134  // The destroy flag controls whether the cache will clean up its
135  // allocated items on destruction.
136  UT_ThreadSafeCache(bool destroy = true);
138 
139  // Set cache size in megabytes
140  void setMaxMemory(int64 size_mb, bool prune = false);
141  // Set cache size in bytes
142  void setMaxMemoryBytes(int64 size_bytes, bool prune = false);
144  { return myMemoryLimit; }
145 
146  // Get total cache memory usage in bytes, including all allocated item
147  // memory.
148  int64 getMemoryUsage() const { return myMemoryUsed; }
149 
150  // Get number of cached items
151  int64 entries() const { return myEntries; }
152 
153  // Set user data that is passed to allocate()/deallocate()
154  void setUserData(void *data) { myUserData = data; }
155 
156  // Control whether we evict items when memory limit is used. Default is
157  // true.
158  void setCheckMemoryLimit(bool check)
159  { myCheckMemoryLimit = check; }
160  bool getCheckMemoryLimit() const
161  { return myCheckMemoryLimit; }
162 
163  // Access an item in the cache. If the item is not already in the
164  // cache, it will be inserted at the head. Calling this operation
165  // acquires an access lock that must be released with release(). parms
166  // will be passed directly to the allocate() operation.
167  void access(UT_CacheItem *item, void *parms = 0)
168  {
169  item->incRefCount();
170 
171  // This prevents the memory load in item->isInCache() from occurring
172  // before the store in item->incRefCount(), which is necessary for
173  // the reasons outlined in freeItem(). Basically, item could be
174  // deallocated after calling item->incRefCount(), but only if
175  // item->incRefCount() occurs after item->isInCache() becomes false.
176  // Thus, so long as item->isInCache() is read after
177  // item->incRefCount(), it won't get a stale true value, which would
178  // have falsely indicated that allocation isn't necessary.
179  SYSmemoryFence();
180 
181  // Before moving items in the cache, check
182  // whether the head has been moved a
183  // sufficient number of times to make it
184  // useful to move the item back to the head
185  // of the cache. This way, if the same set
186  // of items are always referenced we never
187  // need to modify the cache.
188  if (item->isInCache() && isNearHead(item))
189  {
190  return;
191  }
192 
193  // Now, reorder the item in the cache
194  accessLockedReorder(item, parms);
195  }
196 
197  // Release the access lock on an item. For every call to access()
198  // there must be a corresponding call to release().
199  void release(UT_CacheItem *item)
200  {
201  if (!item->decRefCount() && !item->isInCache())
202  freeItem(item);
203  }
204 
205  // Release the access lock and remove an item from the cache, if there
206  // are no more references.
207  void releaseAndDeallocate(UT_CacheItem *item)
208  {
209  if (!item->decRefCount())
210  freeItem(item);
211  }
212 
213  // Deallocates and removes an item from the cache. This is not
214  // thread-safe while anything has a reference to the item.
215  void freeItem(UT_CacheItem *item);
216 
217  // Clear all entries from the cache.
218  void freeAllItems();
219 
220  // Prune entries from the cache until we hit the given memory usage
221  void pruneItems(int64 memory_limit);
222 
223  // Prints fault statistics categorized by item type
224  void dumpStats(const char *name) const;
225 
227  {
228  public:
230  : myCurr(NULL)
231  , myHead(NULL)
232  {}
234  : myCurr(src.myCurr)
235  , myHead(src.myHead)
236  {}
238 
239  const UT_CacheItem *get() const { return myCurr; }
240  int operator==(const unsafe_traverser &cmp) const
241  { return myCurr == cmp.myCurr; }
242  int operator!=(const unsafe_traverser &cmp) const
243  { return myCurr != cmp.myCurr; }
244  bool atEnd() const
245  { return !myCurr; }
246  void rewind()
247  { myCurr = myHead; }
249  {
250  if (myCurr && myCurr->myNext != myHead)
251  myCurr = myCurr->myNext;
252  else
253  myCurr = NULL;
254  return *this;
255  }
257  {
258  myCurr = src.myCurr;
259  myHead = src.myHead;
260  return *this;
261  }
262  private:
264  : myCurr(head)
265  , myHead(head)
266  {}
267  const UT_CacheItem *myCurr, *myHead;
268  friend class UT_ThreadSafeCache;
269  };
270 
271  unsafe_traverser unsafe_begin() const
272  { return unsafe_traverser(myHead); }
273  unsafe_traverser unsafe_end() const
274  { return unsafe_traverser(); }
275 
276 private:
277  /// Reorder the list due to access - requires locking
278  void accessLockedReorder(UT_CacheItem *item, void *parms);
279 
280  void pruneItemsNoLock(int64 memory_limit);
281  void addToCache(UT_CacheItem *item);
282  void removeFromCache(UT_CacheItem *item);
283  void allocateItem(UT_CacheItem *item, void *parms)
284  {
285  if (!item->myAllocated)
286  {
288  lock(myObjLock, item);
289  if (!item->myAllocated)
290  {
291 #ifdef ITEM_TIMING
292  UT_StopWatch timer;
293  timer.start();
294 #endif
295  item->myMemory = item->allocate(
296  parms ? parms : myUserData);
297 
298  // Ensure that the data allocated are written out
299  // to main memory before setting myAllocated to true,
300  // else something could read myAllocated, see that it's
301  // true, read an out-of-date data pointer, probably still
302  // NULL, and crash dereferencing it.
303  SYSstoreFence();
304 
305  item->myAllocated = true;
306 
307  // NOTE: The automatic unlock will do a second SYSstoreFence,
308  // ensuring that myAllocated will be true before the
309  // lock is released.
310 
311 #ifdef ITEM_TIMING
312  item->myAllocationTime = timer.lap();
313 #endif
314  }
315  }
316 
317  // Ensure that code in the calling function doesn't
318  // speculatively read data from item before the read of
319  // item->myAllocated above. Without it, what could happen is:
320  // 0) item->myAllocated starts false, and some subclass member,
321  // p, starts NULL.
322  // 1) This thread (in the calling function after this call)
323  // speculatively reads p, getting NULL.
324  // 2) Another thread sets p to a valid address, and does a store
325  // fence, to ensure it's written out.
326  // 3) The other thread sets myAllocated to true.
327  // 4) The other thread unlocks, doing a store fence just before
328  // unlocking, to ensure myAllocated is written out.
329  // 5) This thread (at the top of this function) reads myAllocated,
330  // getting true.
331  // 6) The calling function already has a value for p, and myAllocated
332  // is true, so it dereferences p, and crashes.
333  SYSloadFence();
334  }
335 
336  // Heuristic to decide when an item is close to the head. Items can
337  // only be moved when this method returns false.
338  bool isNearHead(const UT_CacheItem *item) const
339  { return myHeadTime - item->myTimeStamp <=
340  myQuarterEntries; }
341 
342 private:
343 
344  typedef typename Lock::Scope LockScope;
345 
346  // This class keeps track of fault statistics for a given type of item
347  // in the cache.
348  class FaultStats {
349  public:
350  FaultStats()
351  : myFaults(0)
352  , myTotalMemory(0)
353  , myTotalTime(0) {}
354 
355  public:
356  int64 myFaults;
357  int64 myTotalMemory;
358  double myTotalTime;
359  };
360 
361  typedef std::map<const char *, FaultStats> FaultMap;
362 
363  UT_ObjLockType<Lock> myObjLock;
364  Lock myThreadLock;
365  UT_CacheItem *myHead;
366  int64 myMemoryLimit;
367  int64 myMemoryUsed;
368  int64 myEntries;
369  int64 myQuarterEntries;
370  FaultMap myFaultMap;
371  uint64 myHeadTime;
372  void *myUserData;
373  bool myDestroy;
374  bool myCheckMemoryLimit;
375 };
376 
377 /// This can be used as a unified cache for a process. The cache will not
378 /// destroy any items left in the cache when the process exits.
380 {
381 public:
382  static UT_ThreadSafeCache<UT_Lock> &get() { return theCache; }
383 
384  /// Set the amount of memory to be used by the cache, in MB
385  static void setMaxMemory(int size_mb)
386  {
387  theCache.setMaxMemory(size_mb);
388  theMemorySet = true;
389  }
390 
391  /// Check whether the memory limit has been set
392  static bool isMaxMemorySet()
393  { return theMemorySet; }
394 
396  { return theCache.getMemoryUsage(); }
397 
398 private:
399  static UT_ThreadSafeCache<UT_Lock> theCache;
400  static bool theMemorySet;
401 };
402 
403 /// Scoped accessor object for accessing unified cache items.
404 template<typename T>
406 {
407 public:
408  /// Default initializer. Can be used to initialize the accessor to a cache
409  /// item right away. Otherwise call @c reset to set the item to access.
410  UT_UnifiedCacheAccessor(T *item = 0, void *parm = 0)
411  {
412  access(item, parm);
413  }
414 
415  /// Release the currently accessed cache item, if any.
417  {
418  release();
419  }
420 
421  /// Returns @c true if no item is being accessed.
422  bool empty() const { return myItem == 0; }
423 
424  /// Returns the current item being accessed
425  T *item() const { return myItem; }
426 
427  /// Indirection operator for the current item being accessed. It is the
428  /// caller's responsibility to ensure that the item is valid, e.g. by
429  /// calling @c empty on it first.
430  T *operator->() const { return myItem; }
431 
432  /// Reset the currently accessed cached item to a new item. If the item
433  /// given is the same as the one currently being accessed, this call is a
434  /// no-op, even if a new @c param value is given. Reset the item to @c
435  /// nullptr and back to the wanted item if a change in @c parm value is
436  /// required.
437  void reset(T *item, void *parm = 0)
438  {
439  if (myItem != item)
440  {
441  release();
442  access(item, parm);
443  }
444  }
445 
446 private:
447  void access(T *item, void *parm)
448  {
449  myItem = item;
450  if (myItem)
451  UT_UnifiedCache::get().access(item, parm);
452  }
453  void release()
454  {
455  if (myItem)
456  {
457  UT_UnifiedCache::get().release(myItem);
458  myItem = 0;
459  }
460  }
461 
462  T *myItem;
463 };
464 
465 #endif
void reset(T *item, void *parm=0)
T * item() const
Returns the current item being accessed.
static void setMaxMemory(int size_mb)
Set the amount of memory to be used by the cache, in MB.
fpreal64 lap() const
int64 getMemoryUsage() const
void setCheckMemoryLimit(bool check)
void access(UT_CacheItem *item, void *parms=0)
#define SYSloadFence()
#define UT_API
Definition: UT_API.h:12
void setMaxMemoryBytes(int64 size_bytes, bool prune=false)
unsafe_traverser unsafe_begin() const
~UT_UnifiedCacheAccessor()
Release the currently accessed cache item, if any.
long long int64
Definition: SYS_Types.h:100
unsigned long long uint64
Definition: SYS_Types.h:101
void release(UT_CacheItem *item)
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:102
void releaseAndDeallocate(UT_CacheItem *item)
GLuint GLint GLboolean GLint GLenum access
Definition: glcorearb.h:2221
void setUserData(void *data)
bool getCheckMemoryLimit() const
#define SYSstoreFence()
void pruneItems(int64 memory_limit)
static bool isMaxMemorySet()
Check whether the memory limit has been set.
int operator!=(const unsafe_traverser &cmp) const
GLboolean * data
Definition: glcorearb.h:130
GLuint const GLchar * name
Definition: glcorearb.h:785
int64 entries() const
int cmp(T a, T b)
Definition: ImathFun.h:119
unsafe_traverser unsafe_end() const
int operator==(const unsafe_traverser &cmp) const
const unsafe_traverser & operator=(const unsafe_traverser &src)
unsafe_traverser(const unsafe_traverser &src)
#define SYSmemoryFence()
Scoped accessor object for accessing unified cache items.
void prune(TreeT &tree, typename TreeT::ValueType tolerance=zeroVal< typename TreeT::ValueType >(), bool threaded=true, size_t grainSize=1)
Reduce the memory footprint of a tree by replacing with tiles any nodes whose values are all the same...
Definition: Prune.h:374
static UT_ThreadSafeCache< UT_Lock > & get()
static int64 getMemoryUsage()
void dumpStats(const char *name) const
int64 getMaxMemoryBytes() const
void setMaxMemory(int64 size_mb, bool prune=false)
UT_UnifiedCacheAccessor(T *item=0, void *parm=0)
GLenum src
Definition: glcorearb.h:1792
bool empty() const
Returns true if no item is being accessed.
void freeItem(UT_CacheItem *item)