HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
UT_TaskLock.h
Go to the documentation of this file.
1 /*
2  * PROPRIETARY INFORMATION. This software is proprietary to
3  * Side Effects Software Inc., and is not to be reproduced,
4  * transmitted, or disclosed in any way without written permission.
5  *
6  * NAME: UT_TaskLock.h (UT Library, C++)
7  *
8  * COMMENTS:
9  *
10  */
11 
12 #ifndef __UT_TASKLOCK_H_INCLUDED__
13 #define __UT_TASKLOCK_H_INCLUDED__
14 
15 #include "UT_API.h"
16 #include "UT_Array.h"
17 #include "UT_Assert.h"
18 #include "UT_Debug.h"
19 #include "UT_LockUtil.h"
20 #include "UT_NonCopyable.h"
21 #include "UT_TaskArena.h"
22 #include "UT_TaskScope.h"
23 #include <SYS/SYS_BoostThread.h>
24 
25 #include <stddef.h>
26 
27 // Set the following line to #if 1 to enable debug message output
28 #ifndef UT_TASKLOCK_DBG
29  #if 0
30  #define UT_TASKLOCK_DBG(ZZ) UT_DBGOUT(ZZ)
31  #else
32  #define UT_TASKLOCK_DBG(ZZ)
33  #endif
34 #endif
35 
36 /// A recursive mutex class for synchronizing amongst tasks defined by
37 /// UT_TaskScope.
38 ///
39 /// The assumptions that UT_TaskScope uses for tasks are:
40 /// - Tasks are hierarchical in a forest of trees
41 /// - When child tasks are running, the parent task is not
42 /// - Child tasks always finish before their parent task
43 ///
44 /// Requirements:
45 /// - You must unlock before your own task ends
46 /// - You must unlock your lock before waiting for a child task to end
47 ///
48 template <bool NESTED>
50 {
51 private:
52  friend class Scope;
53 
54  typedef hboost::mutex ut_Mutex;
55  typedef ut_Mutex::scoped_lock ut_MutexLock;
56  typedef hboost::condition_variable ut_Condition;
57 
58  struct ut_Data
59  {
60  const UT_TaskScope *myOwner;
61  int myLockCount;
62 
63  ut_Data()
64  : myOwner(NULL)
65  , myLockCount(0)
66  {
67  }
68  };
69 
70  ut_Mutex myMutex;
71  ut_Condition myCondition;
72  int myNumWaitingThreads;
73  ut_Data myData;
74  UT_Array<ut_Data> myDataStack;
75 
76  bool unsafeTryLock(const UT_TaskScope &task, bool &was_first)
77  {
78  UT_ASSERT(myData.myLockCount >= 0);
79  if (myData.myLockCount == 0 || myData.myOwner == &task)
80  {
81  UT_ASSERT(myData.myLockCount > 0 || myData.myOwner == NULL);
82  myDataStack.append(myData);
83  myData.myOwner = &task;
84  ++myData.myLockCount;
85  was_first = (myData.myLockCount == 1);
86  UT_TASKLOCK_DBG(("Acquired first lock %p for owner %p (count %d)",
87  this, myData.myOwner, myData.myLockCount));
88  return true;
89  }
90  UT_TASKLOCK_DBG(("Failed fast lock %p for task %p, owner %p "
91  "(count %d), waiting threads %d",
92  this, &task, myData.myOwner, myData.myLockCount,
93  myNumWaitingThreads));
94  return false;
95  }
96 
97  // Returns true if locked within timeout.
98  bool privateLock(hboost::system_time const &wait_until, bool &was_first)
99  {
100  ut_MutexLock lock_scope(myMutex);
101  const UT_TaskScope & task = getTaskScope();
102 
103  // If nobody holds the lock then acquire it right away
104  if (unsafeTryLock(task, was_first))
105  return true;
106  // If we have positive lock count, then we should already have an owner
107  UT_ASSERT(myData.myOwner != NULL);
108 
109  // Perform lock, blocking if needed
110  bool ok = true;
111  while (ok)
112  {
113  // Test if we can acquire the lock
114  if (myData.myOwner == NULL
115  || task.isAncestor(*myData.myOwner))
116  {
117  UT_ASSERT(myData.myOwner != NULL || myData.myLockCount == 0);
118  myDataStack.append(myData);
119  myData.myOwner = &task;
120  myData.myLockCount = 1;
121  was_first = true;
122  UT_TASKLOCK_DBG(("Took lock %p for owner %p from"
123  " %p (count %d)",
124  this, myData.myOwner,
125  myDataStack.last().myOwner,
126  myDataStack.last().myLockCount));
127  return true;
128  }
129 
130  // unlock myMutex and wait until it can be acquired again
131  UT_TASKLOCK_DBG(("Waiting on lock %p with owner %p (count %d) "
132  "prev waiting threads %d",
133  this, myData.myOwner, myData.myLockCount,
134  myNumWaitingThreads));
135  ++myNumWaitingThreads;
136  if (wait_until.is_pos_infinity())
137  {
138  myCondition.wait(lock_scope);
139  }
140  else
141  {
142  ok = myCondition.timed_wait(lock_scope, wait_until);
143  }
144  --myNumWaitingThreads;
145  UT_ASSERT(myNumWaitingThreads >= 0);
146  }
147 
148  return false;
149  }
150 
151  bool privateTryLock()
152  {
153  ut_MutexLock lock_scope(myMutex);
154  const UT_TaskScope & task = getTaskScope();
155  bool was_first = false;
156 
157  return unsafeTryLock(task, was_first);
158  }
159 
160  void privateUnlock()
161  {
162  ut_MutexLock lock_scope(myMutex);
163  bool notify;
164 
165  UT_TASKLOCK_DBG(("Release lock %p for owner %p, "
166  "new owner %p (count %d), waiting threads %d",
167  this, myData.myOwner,
168  myDataStack.last().myOwner,
169  myDataStack.last().myLockCount,
170  myNumWaitingThreads));
171 
172  UT_ASSERT(myData.myLockCount >= 1);
173  notify = (myData.myLockCount == 1);
174 
175  myData = myDataStack.last();
176  myDataStack.removeLast();
177 
178  // Release the lock if the count goes down to zero.
179  UT_ASSERT(myData.myLockCount >= 0);
180  if (notify)
181  {
182  UT_ASSERT(myData.myLockCount > 0 || myData.myOwner == NULL);
183 
184  // Signal all the threads that are waiting. We don't want to
185  // signal just one, since the next one wouldn't get signalled until
186  // unlock() is called again, and we want to let in all descendent
187  // tasks
188  if (myNumWaitingThreads > 0)
189  {
190  myCondition.notify_all();
191  }
192  }
193  }
194 
195  bool privateHasLock()
196  {
197  ut_MutexLock lock_scope(myMutex);
198  const UT_TaskScope & task = getTaskScope();
199  return (myData.myLockCount > 0 && myData.myOwner == &task);
200  }
201 
202  static const UT_TaskScope &getTaskScope()
203  {
204  if (NESTED)
206  else
208  }
209 
210 public:
211 
213  : myNumWaitingThreads(0)
214  {
215  }
217  {
218  }
219 
220  void lock()
221  {
222  bool was_first = false;
223  (void) privateLock(hboost::system_time(hboost::posix_time::pos_infin),
224  was_first);
225  }
226 
227  /// Same as lock() except it also returns if it was the first time this
228  /// task scope obtained the lock (ie. non-recursively).
229  void lock(bool &was_first)
230  {
231  (void) privateLock(hboost::system_time(hboost::posix_time::pos_infin),
232  was_first);
233  }
234 
235  bool timedLock(int timeout)
236  {
237  bool was_first = false;
238  return privateLock(hboost::get_system_time()
239  + hboost::posix_time::milliseconds(timeout),
240  was_first);
241  }
242 
243  bool tryLock()
244  {
245  return privateTryLock();
246  }
247 
248  bool safeLock()
249  {
250  lock();
251  return true;
252  }
253 
254  void unlock()
255  {
256  privateUnlock();
257  }
258 
259  bool hasLock()
260  {
261  return privateHasLock();
262  }
263 
264  /// Class for auto-unlocking
266 };
267 
268 /// A recursive mutex class for synchronizing amongst tasks defined by
269 /// UT_TaskScope.
270 ///
271 /// The assumptions that UT_TaskScope uses for tasks are:
272 /// - Tasks are hierarchical in a forest of trees
273 /// - When child tasks are running, the parent task is not
274 /// - Child tasks always finish before their parent task
275 ///
276 /// Requirements:
277 /// - You must unlock before your own task ends
278 /// - You must unlock your lock before waiting for a child task to end
279 ///
281 
283 
284 
285 /// UT_TaskLock that avoids deadlocks when used with TBB task scheduling.
286 class UT_API UT_TaskLockWithArena : private UT_TaskLock
287 {
288 public:
289 
290  /// Performs the functor F while inside this lock scope in UT_TaskArena.
291  /// This method allows the optimization that if we're calling this while
292  /// the lock is already held in the same task scope, then we can avoid
293  /// creating an unnecessary task arena.
294  template <typename F>
295  void
296  lockedExecute(const F &functor)
297  {
298  bool was_first = false;
299  (void) UT_TaskLock::lock(was_first);
300  if (!was_first)
301  {
302  functor();
303  }
304  else
305  {
306  UT_TaskArena arena;
307  arena.execute(functor);
308  }
310  }
311 
312  /// Performs the functor F while inside this lock scope OUTSIDE an arena.
313  /// @note Only do this if you know functor will never spawn tasks!
314  template <typename F>
315  void
316  lockedExecuteWithoutArena(const F &functor)
317  {
319  functor();
321  }
322 
323  using UT_TaskLock::hasLock;
324 };
325 
326 #endif // __UT_TASKLOCK_H_INCLUDED__
T & last()
Definition: UT_Array.h:608
#define UT_TASKLOCK_DBG(ZZ)
Definition: UT_TaskLock.h:32
void execute(F &functor)
Definition: UT_TaskArena.h:37
static const UT_TaskScope & getOrCreateCurrent()
Definition: UT_TaskScope.h:111
bool tryLock()
Definition: UT_TaskLock.h:243
SYS_FORCE_INLINE void removeLast()
Definition: UT_Array.h:225
#define UT_API
Definition: UT_API.h:13
static const UT_TaskScope & getOrCreateRoot()
Definition: UT_TaskScope.h:133
UT_TaskLockT< false > UT_TaskRootLock
Definition: UT_TaskLock.h:282
bool safeLock()
Definition: UT_TaskLock.h:248
bool timedLock(int timeout)
Definition: UT_TaskLock.h:235
bool hasLock()
Definition: UT_TaskLock.h:259
void
Definition: png.h:1083
void lock(bool &was_first)
Definition: UT_TaskLock.h:229
bool isAncestor(const UT_TaskScope &parent) const
Test if the given task scope is an ancestor of ours.
Definition: UT_TaskScope.h:140
exint append()
Definition: UT_Array.h:95
UT_TaskLockT< true > UT_TaskLock
Definition: UT_TaskLock.h:280
UT_TaskLock that avoids deadlocks when used with TBB task scheduling.
Definition: UT_TaskLock.h:286
void lockedExecuteWithoutArena(const F &functor)
Definition: UT_TaskLock.h:316
#define UT_ASSERT(ZZ)
Definition: UT_Assert.h:135
void unlock()
Definition: UT_TaskLock.h:254
void lockedExecute(const F &functor)
Definition: UT_TaskLock.h:296
GLbitfield GLuint64 timeout
Definition: glew.h:6605
UT_LockScopeType< UT_TaskLockT< NESTED > > Scope
Class for auto-unlocking.
Definition: UT_TaskLock.h:265