NVIDIA IndeX: Base API nvidia_logo_transpbg.gif Up
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
lock.h
Go to the documentation of this file.
1 /***************************************************************************************************
2  * Copyright 2020 NVIDIA Corporation. All rights reserved.
3  **************************************************************************************************/
8 
9 #ifndef MI_BASE_LOCK_H
10 #define MI_BASE_LOCK_H
11 
12 #include <cstdlib>
13 
14 #include <mi/base/assert.h>
15 #include <mi/base/config.h>
16 
17 #ifndef MI_PLATFORM_WINDOWS
18 #include <cerrno>
19 #include <pthread.h>
20 #else
21 #include <mi/base/miwindows.h>
22 #endif
23 
24 namespace mi {
25 
26 namespace base {
27 
39 class Lock
49 {
50 public:
52  Lock();
53 
55  ~Lock();
56 
60  class Block
61  {
62  public:
67  explicit Block( Lock* lock = 0);
68 
72  ~Block();
73 
82  void set( Lock* lock);
83 
94  bool try_set( Lock* lock);
95 
99  void release();
100 
101  private:
102  // The lock associated with this helper class.
103  Lock* m_lock;
104  };
105 
106 protected:
108  void lock();
109 
111  bool try_lock();
112 
114  void unlock();
115 
116 private:
117  // This class is non-copyable.
118  Lock( Lock const &);
119 
120  // This class is non-assignable.
121  Lock& operator=( Lock const &);
122 
123 #ifndef MI_PLATFORM_WINDOWS
124  // The mutex implementing the lock.
125  pthread_mutex_t m_mutex;
126 #else
127  // The critical section implementing the lock.
128  CRITICAL_SECTION m_critical_section;
129  // The flag used to ensure that the lock is non-recursive.
130  bool m_locked;
131 #endif
132 };
133 
143 {
144 public:
146  Recursive_lock();
147 
149  ~Recursive_lock();
150 
154  class Block
155  {
156  public:
161  explicit Block( Recursive_lock* lock = 0);
162 
166  ~Block();
167 
176  void set( Recursive_lock* lock);
177 
188  bool try_set( Recursive_lock* lock);
189 
193  void release();
194 
195  private:
196  // The lock associated with this helper class.
197  Recursive_lock* m_lock;
198  };
199 
200 protected:
202  void lock();
203 
205  bool try_lock();
206 
208  void unlock();
209 
210 private:
211  // This class is non-copyable.
212  Recursive_lock( Recursive_lock const &);
213 
214  // This class is non-assignable.
215  Recursive_lock& operator=( Recursive_lock const &);
216 
217 #ifndef MI_PLATFORM_WINDOWS
218  // The mutex implementing the lock.
219  pthread_mutex_t m_mutex;
220 #else
221  // The critical section implementing the lock.
222  CRITICAL_SECTION m_critical_section;
223 #endif
224 };
225 
226 #ifndef MI_FOR_DOXYGEN_ONLY
227 
228 inline Lock::Lock()
229 {
230 #ifndef MI_PLATFORM_WINDOWS
231  pthread_mutexattr_t mutex_attributes;
232  pthread_mutexattr_init( &mutex_attributes);
233  pthread_mutexattr_settype( &mutex_attributes, PTHREAD_MUTEX_ERRORCHECK);
234  pthread_mutex_init( &m_mutex, &mutex_attributes);
235 #else
236  InitializeCriticalSection( &m_critical_section);
237  m_locked = false;
238 #endif
239 }
240 
241 inline Lock::~Lock()
242 {
243 #ifndef MI_PLATFORM_WINDOWS
244  int result = pthread_mutex_destroy( &m_mutex);
245  // Avoid assertion here because it might be mapped to an exception.
246  // mi_base_assert( result == 0);
247  (void) result;
248 #else
249  // Avoid assertion here because it might be mapped to an exception.
250  // mi_base_assert( !m_locked);
251  DeleteCriticalSection( &m_critical_section);
252 #endif
253 }
254 
255 inline void Lock::lock()
256 {
257 #ifndef MI_PLATFORM_WINDOWS
258  int result = pthread_mutex_lock( &m_mutex);
259  if( result == EDEADLK) {
260  mi_base_assert( !"Dead lock");
261  abort();
262  }
263 #else
264  EnterCriticalSection( &m_critical_section);
265  if( m_locked) {
266  mi_base_assert( !"Dead lock");
267  abort();
268  }
269  m_locked = true;
270 #endif
271 }
272 
273 inline bool Lock::try_lock()
274 {
275 #ifndef MI_PLATFORM_WINDOWS
276  int result = pthread_mutex_trylock( &m_mutex);
277  // Old glibc versions incorrectly return EDEADLK instead of EBUSY
278  // (https://sourceware.org/bugzilla/show_bug.cgi?id=4392).
279  mi_base_assert( result == 0 || result == EBUSY || result == EDEADLK);
280  return result == 0;
281 #else
282  BOOL result = TryEnterCriticalSection( &m_critical_section);
283  if( result == 0)
284  return false;
285  if( m_locked) {
286  LeaveCriticalSection( &m_critical_section);
287  return false;
288  }
289  m_locked = true;
290  return true;
291 #endif
292 }
293 
294 inline void Lock::unlock()
295 {
296 #ifndef MI_PLATFORM_WINDOWS
297  int result = pthread_mutex_unlock( &m_mutex);
298  mi_base_assert( result == 0);
299  (void) result;
300 #else
301  mi_base_assert( m_locked);
302  m_locked = false;
303  LeaveCriticalSection( &m_critical_section);
304 #endif
305 }
306 
307 inline Lock::Block::Block( Lock* lock)
308 {
309  m_lock = lock;
310  if( m_lock)
311  m_lock->lock();
312 }
313 
314 inline Lock::Block::~Block()
315 {
316  release();
317 }
318 
319 inline void Lock::Block::set( Lock* lock)
320 {
321  if( m_lock == lock)
322  return;
323  if( m_lock)
324  m_lock->unlock();
325  m_lock = lock;
326  if( m_lock)
327  m_lock->lock();
328 }
329 
330 inline bool Lock::Block::try_set( Lock* lock)
331 {
332  if( m_lock == lock)
333  return true;
334  if( m_lock)
335  m_lock->unlock();
336  m_lock = lock;
337  if( m_lock && m_lock->try_lock())
338  return true;
339  m_lock = 0;
340  return false;
341 }
342 
343 inline void Lock::Block::release()
344 {
345  if( m_lock)
346  m_lock->unlock();
347  m_lock = 0;
348 }
349 
351 {
352 #ifndef MI_PLATFORM_WINDOWS
353  pthread_mutexattr_t mutex_attributes;
354  pthread_mutexattr_init( &mutex_attributes);
355  pthread_mutexattr_settype( &mutex_attributes, PTHREAD_MUTEX_RECURSIVE);
356  pthread_mutex_init( &m_mutex, &mutex_attributes);
357 #else
358  InitializeCriticalSection( &m_critical_section);
359 #endif
360 }
361 
363 {
364 #ifndef MI_PLATFORM_WINDOWS
365  int result = pthread_mutex_destroy( &m_mutex);
366  // Avoid assertion here because it might be mapped to an exception.
367  // mi_base_assert( result == 0);
368  (void) result;
369 #else
370  DeleteCriticalSection( &m_critical_section);
371 #endif
372 }
373 
374 inline void Recursive_lock::lock()
375 {
376 #ifndef MI_PLATFORM_WINDOWS
377  int result = pthread_mutex_lock( &m_mutex);
378  // Avoid assertion here because it might be mapped to an exception.
379  // mi_base_assert( result == 0);
380  (void) result;
381 #else
382  EnterCriticalSection( &m_critical_section);
383 #endif
384 }
385 
386 inline bool Recursive_lock::try_lock()
387 {
388 #ifndef MI_PLATFORM_WINDOWS
389  int result = pthread_mutex_trylock( &m_mutex);
390  mi_base_assert( result == 0 || result == EBUSY);
391  return result == 0;
392 #else
393  BOOL result = TryEnterCriticalSection( &m_critical_section);
394  return result != 0;
395 #endif
396 }
397 
398 inline void Recursive_lock::unlock()
399 {
400 #ifndef MI_PLATFORM_WINDOWS
401  int result = pthread_mutex_unlock( &m_mutex);
402  mi_base_assert( result == 0);
403  (void) result;
404 #else
405  LeaveCriticalSection( &m_critical_section);
406 #endif
407 }
408 
409 inline Recursive_lock::Block::Block( Recursive_lock* lock)
410 {
411  m_lock = lock;
412  if( m_lock)
413  m_lock->lock();
414 }
415 
417 {
418  release();
419 }
420 
421 inline void Recursive_lock::Block::set( Recursive_lock* lock)
422 {
423  if( m_lock == lock)
424  return;
425  if( m_lock)
426  m_lock->unlock();
427  m_lock = lock;
428  if( m_lock)
429  m_lock->lock();
430 }
431 
432 inline bool Recursive_lock::Block::try_set( Recursive_lock* lock)
433 {
434  if( m_lock == lock)
435  return true;
436  if( m_lock)
437  m_lock->unlock();
438  m_lock = lock;
439  if( m_lock && m_lock->try_lock())
440  return true;
441  m_lock = 0;
442  return false;
443 }
444 
445 inline void Recursive_lock::Block::release()
446 {
447  if( m_lock)
448  m_lock->unlock();
449  m_lock = 0;
450 }
451 
452 #endif // MI_FOR_DOXYGEN_ONLY
453  // end group mi_base_threads
455 
456 } // namespace base
457 
458 } // namespace mi
459 
460 #endif // MI_BASE_LOCK_H