Tekkotsu Homepage
Demos
Overview
Downloads
Dev. Resources
Reference
Credits

MutexLock.h

Go to the documentation of this file.
00001 //-*-c++-*-
00002 #ifndef __MUTEX_LOCK_ET__
00003 #define __MUTEX_LOCK_ET__
00004 
00005 #include "Shared/Resource.h"
00006 #include "ProcessID.h"
00007 #if !defined(PLATFORM_APERIOS)
00008 #  include "IPC/RCRegion.h"
00009 #endif
00010 #include <iostream>
00011 #include <exception>
00012 #include <typeinfo>
00013 
00014 // If you want to use the same software-only lock on both
00015 // PLATFORM_LOCAL and Aperios, then uncomment this next line:
00016 //#define MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00017 
00018 // However, that's probably only of use if you want to debug a problem with the lock itself
00019 
00020 class SemaphoreManager;
00021 
00022 //! The main purpose of this base class is actually to allow setting of usleep_granularity across all locks
00023 /*! It would be nice if we just put functions in here so we could
00024  *  reference locks without regard to the number of doors, but
00025  *  then all processes which use the lock would have to have been
00026  *  created via fork to handle virtual calls properly, and I don't
00027  *  want to put that overhead on the otherwise lightweight SoundPlay
00028  *  process under Aperios. */
00029 class MutexLockBase : public Resource {
00030 public:
00031   virtual ~MutexLockBase() {} //!< basic destructor
00032   
00033   static const unsigned int NO_OWNER=-1U; //!< marks as unlocked
00034   static unsigned int usleep_granularity; //!< the estimated cost in microseconds of usleep call itself -- value passed to usleep will be 10 times this (only used by software lock implementation on non-Aperios)
00035 
00036   //This section is only needed for the non-software-only locks, using the SemaphoreManager
00037 #if !defined(PLATFORM_APERIOS) && !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY)
00038 #  if !defined(TEKKOTSU_SHM_STYLE) || TEKKOTSU_SHM_STYLE==NO_SHM
00039   static void aboutToFork() {}
00040 #  else
00041   //! exception if a lock is created but there aren't any more semaphores available
00042   class no_more_semaphores : public std::exception {
00043   public:
00044     //! constructor
00045     no_more_semaphores() throw() : std::exception() {}
00046     //! returns a constant message string
00047     virtual const char* what() const throw() { return "SemaphoreManager::getSemaphore() returned invalid()"; }
00048   };
00049   
00050   //! sets the SemaphoreManager which will hand out semaphores for any and all locks
00051   /*! see #preallocated for an explanation of why this function does what it does */
00052   static void setSemaphoreManager(SemaphoreManager* mgr);
00053   
00054   //! returns #semgr;
00055   static SemaphoreManager* getSemaphoreManager() {
00056     return semgr;
00057   }
00058   //! this should be called if a fork() is about to occur (need to pass notification on to #preallocated)
00059   static void aboutToFork();
00060   
00061 protected:
00062   //! the global semaphore manager object for all locks, may point to preallocated during process initialization or destruction
00063   static SemaphoreManager* semgr;
00064   
00065   //! if a semaphore needs to be reserved, and #semgr is NULL, use #preallocated's current value and increment it
00066   /*! Here's the conundrum: each shared region needs a lock, each
00067    *  lock needs an ID from the semaphore manager, and the semaphore
00068    *  manager needs to be in a shared region to coordinate handing out
00069    *  IDs.  So this is resolved by having the locks check #semgr to see
00070    *  if it is initialized yet, and use this if it is not.
00071    *  Then, when the SemaphoreManager is assigned, we will copy over
00072    *  preallocated IDs from here
00073    *
00074    *  For reference, only MutexLock needs to worry about this because
00075    *  it's the only thing that's going to need an ID before the
00076    *  manager is created.*/
00077   static SemaphoreManager preallocated;
00078 #  endif
00079 #endif
00080 };
00081 
00082 
00083 
00084 #if !defined(PLATFORM_APERIOS) && !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY)
00085 #  if !defined(TEKKOTSU_SHM_STYLE) || TEKKOTSU_SHM_STYLE==NO_SHM
00086 #    include "Thread.h"
00087 
00088 //! Implements a mutual exclusion lock using pthread mutex
00089 /*! Use this to prevent more than one thread from accessing a data structure
00090 *  at the same time (which often leads to unpredictable and unexpected results)
00091 *
00092 *  The template parameter is not used (only needed if compiling with IPC enabled)
00093 *
00094 *  Locks in this class can be recursive or non-recursive, depending
00095 *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00096 *  you need to call unlock() 5 times as well before it will be
00097 *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00098 *  will undo all 5 levels of locking.
00099 *
00100 *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00101 *
00102 *  Note that there is no check that the thread doing the unlocking is the one
00103 *  that actually has the lock.  Be careful about this.
00104 */
00105 template<unsigned int num_doors>
00106 class MutexLock : public MutexLockBase {
00107 public:
00108   //! constructor, gets a new semaphore from the semaphore manager
00109   MutexLock() : owner_index(NO_OWNER), thdLock() {}
00110   
00111   //! destructor, releases semaphore back to semaphore manager
00112   ~MutexLock() {
00113     if(owner_index!=NO_OWNER) {
00114       owner_index=NO_OWNER;
00115       while(thdLock.getLockLevel()>0)
00116         thdLock.unlock();
00117     }
00118   }
00119   
00120   //! blocks until lock is achieved.  This is done efficiently using a SysV style semaphore
00121   /*! You should pass some process-specific ID number as the input - just
00122    *  make sure no other process will be using the same value. */
00123   void lock(int id) {
00124     thdLock.lock();
00125     if(owner_index!=static_cast<unsigned>(id))
00126       owner_index=id;
00127   }
00128   
00129   //! attempts to get a lock, returns true if it succeeds
00130   /*! You should pass some process-specific ID number as the input - just
00131    *  make sure no other process will be using the same value.*/
00132   bool try_lock(int id) {
00133     if(!thdLock.trylock())
00134       return false;
00135     owner_index=id;
00136     return true;
00137   }
00138   
00139   //! releases one recursive lock-level from whoever has the current lock
00140   inline void unlock() {
00141     if(thdLock.getLockLevel()<=0)
00142       std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00143     if(thdLock.getLockLevel()<=1)
00144       owner_index=NO_OWNER;
00145     thdLock.unlock();
00146   }
00147   
00148   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00149   void releaseAll() {
00150     owner_index=NO_OWNER;
00151     while(thdLock.getLockLevel()>0)
00152       thdLock.unlock();
00153   }
00154   
00155   //! returns the lockcount
00156   unsigned int get_lock_level() const { return thdLock.getLockLevel(); }
00157   
00158   //! returns the current owner's id
00159   inline int owner() const { return owner_index; }
00160   
00161 protected:
00162   friend class MarkScope;
00163   virtual void useResource(Resource::Data&) { lock(ProcessID::getID()); }
00164   virtual void releaseResource(Resource::Data&) { unlock(); }
00165   
00166   unsigned int owner_index; //!< holds the tekkotsu process id of the current lock owner
00167   Thread::Lock thdLock; //!< all the actual implementation is handed off to the thread lock
00168 };
00169 
00170 #  else /* IPC Lock using Semaphores*/
00171 #    include "SemaphoreManager.h"
00172 #    include <unistd.h>
00173 #    include <pthread.h>
00174 
00175 //! Implements a mutual exclusion lock using semaphores (SYSV style through SemaphoreManager)
00176 /*! Use this to prevent more than one process from accessing a data structure
00177  *  at the same time (which often leads to unpredictable and unexpected results)
00178  *
00179  *  The template parameter specifies the maximum number of different processes
00180  *  which need to be protected.  This needs to be allocated ahead of time, as
00181  *  there doesn't seem to be a way to dynamically scale as needed without
00182  *  risking possible errors if two processes are both trying to set up at the
00183  *  same time.  Also, by using a template parameter, all data structures are
00184  *  contained within the class's memory allocation, so no pointers are involved.
00185  *
00186  *  Locks in this class can be recursive or non-recursive, depending
00187  *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00188  *  you need to call unlock() 5 times as well before it will be
00189  *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00190  *  will undo all 5 levels of locking.
00191  *
00192  *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00193  *
00194  *  Note that there is no check that the process doing the unlocking is the one
00195  *  that actually has the lock.  Be careful about this.
00196  *
00197  *  @warning Doing mutual exclusion in software is tricky business, be careful about any
00198  *  modifications you make!
00199  */
00200 template<unsigned int num_doors>
00201 class MutexLock : public MutexLockBase {
00202 public:
00203   //! constructor, gets a new semaphore from the semaphore manager
00204   MutexLock() : MutexLockBase(), 
00205     sem(semgr->getSemaphore()), owner_index(NO_OWNER), owner_thread()
00206   {
00207     if(sem==semgr->invalid())
00208       throw no_more_semaphores();
00209     semgr->setValue(sem,0);
00210   }
00211   
00212   //! constructor, use this if you already have a semaphore id you want to use from semaphore manager
00213   MutexLock(SemaphoreManager::semid_t semid) : MutexLockBase(), 
00214     sem(semid), owner_index(NO_OWNER), owner_thread()
00215   {
00216     if(sem==semgr->invalid())
00217       throw no_more_semaphores();
00218     semgr->setValue(sem,0);
00219   }
00220   
00221   //! destructor, releases semaphore back to semaphore manager
00222   ~MutexLock() {
00223     if(owner_index!=NO_OWNER) {
00224       owner_index=NO_OWNER;
00225       owner_thread=pthread_self();
00226       if(semgr!=NULL && !semgr->hadFault()) {
00227         unsigned int depth=semgr->getValue(sem);
00228         semgr->setValue(sem,0);
00229         while(depth-->0)
00230           Thread::popNoCancel(); 
00231       }
00232     }
00233     if(semgr!=NULL && !semgr->hadFault())
00234       semgr->releaseSemaphore(sem);
00235     else
00236       std::cerr << "Warning: MutexLock leaked semaphore " << sem << " because SemaphoreManager is NULL" << std::endl;
00237   }
00238   
00239   //! blocks until lock is achieved.  This is done efficiently using a SysV style semaphore
00240   /*! You should pass some process-specific ID number as the input - just
00241    *  make sure no other process will be using the same value. */
00242   void lock(int id) {
00243     Thread::pushNoCancel();
00244     doLock(id);
00245   }
00246   
00247   //! attempts to get a lock, returns true if it succeeds
00248   /*! You should pass some process-specific ID number as the input - just
00249    *  make sure no other process will be using the same value.*/
00250   bool try_lock(int id) {
00251     Thread::pushNoCancel();
00252     if(semgr==NULL || semgr->hadFault()) {
00253       std::cerr << "Warning: MutexLock assuming try_lock success of " << sem << " because SemaphoreManager is NULL" << std::endl;
00254       owner_index=id;
00255       return true;
00256     }
00257     // blind grab
00258     semgr->raise(sem,1);
00259     // see if we have it
00260     if(owner()==id && isOwnerThread()) {
00261       //we already had the lock, blind grab added one to its lock level, good to go
00262       return true;
00263     } else {
00264       if(semgr->getValue(sem)==1) {
00265         // got it with the blind grab, set our info
00266         owner_index=id;
00267         owner_thread=pthread_self();
00268         return true;
00269       } else {
00270         // someone else owns it, relinquish the blind grab
00271         if(!semgr->lower(sem,1,false))
00272           std::cerr << "Warning: MutexLock::trylock failure caused strange underflow" << std::endl;
00273         Thread::popNoCancel();
00274         return false;
00275       }
00276     }
00277   }
00278   
00279   //! releases one recursive lock-level from whoever has the current lock
00280   inline void unlock() {
00281     releaseResource(emptyData);
00282     Thread::popNoCancel(); 
00283   }
00284   
00285   //! Completely unlocks, regardless of how many times a recursive lock has been obtained.
00286   /*! Use with extreme caution. */
00287   void releaseAll() {
00288     owner_index=NO_OWNER;
00289     owner_thread=pthread_self();
00290     if(semgr==NULL || semgr->hadFault()) {
00291       std::cerr << "Warning: MutexLock assuming releaseAll of " << sem << " because SemaphoreManager is NULL" << std::endl;
00292       return;
00293     }
00294     unsigned int depth=semgr->getValue(sem);
00295     semgr->setValue(sem,0);
00296     while(depth-->0)
00297       Thread::popNoCancel(); 
00298   }
00299   
00300   //! returns the lockcount
00301   unsigned int get_lock_level() const {
00302     if(semgr==NULL || semgr->hadFault())
00303       return (owner_index==NO_OWNER) ? 0 : 1;
00304     else
00305       return semgr->getValue(sem);
00306   }
00307   
00308   //! returns the current owner's id
00309   inline int owner() const { return owner_index; }
00310   
00311 protected:
00312   //! returns true if the current thread is the one which owns the lock
00313   bool isOwnerThread() {
00314     pthread_t cur=pthread_self();
00315     return pthread_equal(cur,owner_thread);
00316   }
00317   friend class MarkScope;
00318   //! does the actual lock acquisition
00319   void doLock(int id) {
00320     if(owner_index!=static_cast<unsigned>(id) || !isOwnerThread()) {
00321       //have to wait and then claim lock
00322       if(semgr!=NULL && !semgr->hadFault()) {
00323         semgr->testZero_add(sem,1);
00324 #ifdef DEBUG
00325         if(owner_index!=NO_OWNER || !pthread_equal(pthread_t(),owner_thread))
00326           std::cerr << "Owner is not clear: " << owner_index << ' ' << owner_thread << std::endl;
00327 #endif
00328       } else
00329         std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00330       owner_index=id;
00331       owner_thread=pthread_self();
00332     } else {
00333       //we already have lock, add one to its lock level
00334       if(semgr!=NULL && !semgr->hadFault())
00335         semgr->raise(sem,1);
00336       else
00337         std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00338     }
00339   }
00340   virtual void useResource(Resource::Data&) {
00341     doLock(ProcessID::getID());
00342   }
00343   virtual void releaseResource(Resource::Data&) {
00344     if(semgr==NULL || semgr->hadFault()) {
00345       std::cerr << "Warning: MutexLock assuming unlock of " << sem << " from " << owner_index << " because SemaphoreManager is NULL" << std::endl;
00346       owner_index=NO_OWNER;
00347       owner_thread=pthread_t();
00348       return;
00349     }
00350     if(owner_index==NO_OWNER || !isOwnerThread()) {
00351       std::cerr << "Warning: MutexLock::unlock called by thread that didn't own the lock " << owner_index << ' ' << owner_thread << ' ' << pthread_self() << ' ' << semgr->getValue(sem) << std::endl;
00352       return;
00353     }
00354     int depth = semgr->getValue(sem);
00355     if(depth==1) {
00356       owner_index=NO_OWNER;
00357       owner_thread=pthread_t();
00358     } else if(depth<=0) {
00359       std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00360       owner_index=NO_OWNER;
00361       owner_thread=pthread_t();
00362       return;
00363     }
00364     if(!semgr->lower(sem,1,false))
00365       std::cerr << "Warning: MutexLock::unlock caused strange underflow" << std::endl;
00366   }
00367   
00368   SemaphoreManager::semid_t sem; //!< the SysV semaphore number
00369   unsigned int owner_index; //!< holds the tekkotsu process id of the current lock owner
00370   pthread_t owner_thread; //!< holds a thread id for the owner thread
00371   
00372 private:
00373   MutexLock(const MutexLock& ml); //!< copy constructor, do not call
00374   MutexLock& operator=(const MutexLock& ml); //!< assignment, do not call
00375 };
00376 
00377 
00378 
00379 
00380 #  endif /* uni-process or (potentially) multi-process lock? */
00381 #else //SOFTWARE ONLY mutual exclusion, used on Aperios, or if MUTEX_LOCK_ET_USE_SOFTWARE_ONLY is defined
00382 
00383 
00384 //#define MUTEX_LOCK_ET_USE_SPINCOUNT
00385 
00386 // if DEBUG_MUTEX_LOCK is defined, we'll display information about each lock
00387 // access while the left front paw button is pressed
00388 //#define DEBUG_MUTEX_LOCK
00389 
00390 #ifdef DEBUG_MUTEX_LOCK
00391 #  include "Shared/WorldState.h"
00392 #endif
00393 
00394 //! A software only mutual exclusion lock. (does not depend on processor or OS support)
00395 /*! Use this to prevent more than one process from accessing a data structure
00396  *  at the same time (which often leads to unpredictable and unexpected results)
00397  *
00398  *  The template parameter specifies the maximum number of different processes
00399  *  which need to be protected.  This needs to be allocated ahead of time, as
00400  *  there doesn't seem to be a way to dynamically scale as needed without
00401  *  risking possible errors if two processes are both trying to set up at the
00402  *  same time.  Also, by using a template parameter, all data structures are
00403  *  contained within the class's memory allocation, so no pointers are involved.
00404  *
00405  *  Locks in this class can be recursive or non-recursive, depending
00406  *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00407  *  you need to call unlock() 5 times as well before it will be
00408  *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00409  *  will undo all 5 levels of locking.
00410  *
00411  *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00412  *
00413  *  Note that there is no check that the process doing the unlocking is the one
00414  *  that actually has the lock.  Be careful about this.
00415  *
00416  *  @warning Doing mutual exclusion in software is tricky business, be careful about any
00417  *  modifications you make!
00418  *
00419  * Implements a first-come-first-served Mutex as laid out on page 11 of: \n
00420  * "A First Come First Served Mutal Exclusion Algorithm with Small Communication Variables" \n
00421  * Edward A. Lycklama, Vassos Hadzilacos - Aug. 1991
00422 */
00423 template<unsigned int num_doors>
00424 class MutexLock : public MutexLockBase {
00425  public:
00426   //! constructor, just calls the init() function.
00427   MutexLock() : doors_used(0), owner_index(NO_OWNER), lockcount(0) { init();  }
00428 
00429 #ifndef PLATFORM_APERIOS
00430   //! destructor, re-enables thread cancelability if lock was held (non-aperios only)
00431   ~MutexLock() {
00432     if(owner_index!=NO_OWNER) {
00433       owner_index=NO_OWNER;
00434       thdLock[sem].unlock();
00435     }
00436   }
00437 #endif
00438 
00439   //! blocks (by busy looping on do_try_lock()) until a lock is achieved
00440   /*! You should pass some process-specific ID number as the input - just
00441    *  make sure no other process will be using the same value.
00442    *  @todo - I'd like to not use a loop here */
00443   void lock(int id);
00444 
00445   //! attempts to get a lock, returns true if it succeeds
00446   /*! You should pass some process-specific ID number as the input - just
00447    *  make sure no other process will be using the same value.*/
00448   bool try_lock(int id);
00449 
00450   //! releases one recursive lock-level from whoever has the current lock
00451   inline void unlock();
00452 
00453   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00454   void releaseAll() { lockcount=1; unlock(); }
00455   
00456   //! returns the lockcount
00457   unsigned int get_lock_level() const { return lockcount; }
00458 
00459   //! returns the current owner's id
00460   inline int owner() const { return owner_index==NO_OWNER ? NO_OWNER : doors[owner_index].id; }
00461 
00462   //! allows you to reset one of the possible owners, so another process can take its place.  This is not tested
00463   void forget(int id);
00464 
00465 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00466   inline unsigned int getSpincount() { return spincount; } //!< returns the number of times the spin() function has been called
00467   inline unsigned int resetSpincount() { spincount=0; } //!< resets the counter of the number of times the spin() function has been called
00468 #endif
00469   
00470  protected:
00471   friend class MarkScope;
00472   virtual void useResource(Resource::Data&) {
00473     lock(ProcessID::getID());
00474   }
00475   virtual void releaseResource(Resource::Data&) {
00476     unlock();
00477   }
00478   
00479   //! Does the work of trying to get a lock
00480   /*! Pass @c true for @a block if you want it to use FCFS blocking
00481    *  instead of just returning right away if another process has the lock */
00482   bool do_try_lock(unsigned int index, bool block);
00483 
00484   //! returns the internal index mapping to the id number supplied by the process
00485   unsigned int lookup(int id); //may create a new entry
00486 
00487 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00488   volatile unsigned int spincount; //!< handy to track how much time we're wasting
00489   void init() { spincount=0; }//memset((void*)doors,0,sizeof(doors)); } //!< just resets spincount
00490   inline void spin() {
00491     spincount++;
00492 #ifndef PLATFORM_APERIOS
00493     usleep(usleep_granularity*10); //this is a carefully chosen value intended to solve all the world's problems (not)
00494 #endif
00495   } //!< if you find a way to sleep for a few microseconds instead of busy waiting, put it here
00496 #else
00497   void init() { } //!< Doesn't do anything if you have the MUTEX_LOCK_ET_USE_SPINCOUNT undef'ed.  Used to do a memset, but that was causing problems....
00498   //memset((void*)doors,0,sizeof(doors)); } 
00499   inline void spin() {
00500 #ifndef PLATFORM_APERIOS
00501     usleep(usleep_granularity*10); //this is a carefully chosen value intended to solve all the world's problems (not)
00502 #endif
00503   } //!< If you find a way to sleep for a few microseconds instead of busy waiting, put it here
00504 #endif
00505     
00506   //! Holds per process shared info, one of these per process
00507   struct door_t {
00508     door_t() : id(NO_OWNER), FCFS_in_use(false), BL_ready(false), BL_in_use(false), turn('\0'), next_turn_bit('\0') {} //!< constructor
00509     //door_t(int i) : id(i), FCFS_in_use(false), BL_ready(false), BL_in_use(false), next_turn_bit('\0') {}
00510     int id; //!< process ID this doorway is assigned to
00511     volatile bool FCFS_in_use; //!< In FCFS doorway, corresponds to 'c_i'
00512     volatile bool BL_ready; //!< Signals past FCFS doorway, ready for BL doorway, corresponds to 'v_i'
00513     volatile bool BL_in_use; //!< Burns-Lamport doorway, corresponds to 'x_i'
00514     volatile unsigned char turn; //!< clock pulse, initial value doesn't matter
00515     unsigned char next_turn_bit; //!< selects which bit of turn will be flipped next
00516   };
00517 
00518   door_t doors[num_doors]; //!< holds all the doors
00519   unsigned int doors_used; //!< counts the number of doors used
00520   unsigned int owner_index; //!< holds the door index of the current lock owner
00521   unsigned int lockcount; //!< the depth of the lock, 0 when unlocked
00522 };
00523 
00524 
00525 template<unsigned int num_doors>
00526 void
00527 MutexLock<num_doors>::lock(int id) {
00528 #ifndef PLATFORM_APERIOS
00529   thdLock[sem].lock();
00530 #endif
00531   if(owner()!=id) {
00532     if(!do_try_lock(lookup(id),true)) {
00533       //spin(); //note the block argument above -- should never spin if that is actually working
00534       std::cout << "Warning: lock() failed to achieve lock" << std::endl;
00535     }
00536   } else {
00537 #ifdef DEBUG_MUTEX_LOCK
00538     if(state==NULL || state->buttons[LFrPawOffset])
00539       std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00540 #endif
00541   }
00542   lockcount++;
00543 }
00544 
00545 
00546 template<unsigned int num_doors>
00547 bool
00548 MutexLock<num_doors>::try_lock(int id) {
00549 #ifndef PLATFORM_APERIOS
00550   if(!thdLock[sem].trylock())
00551     return false;
00552 #endif
00553   if(owner()==id) {
00554 #ifdef DEBUG_MUTEX_LOCK
00555     if(state==NULL || state->buttons[LFrPawOffset])
00556       std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00557 #endif
00558     lockcount++;
00559     return true;
00560   } else {
00561     if(do_try_lock(lookup(id),false)) {
00562       lockcount++;
00563       return true;
00564     } else {
00565 #ifndef PLATFORM_APERIOS
00566       thdLock[sem].unlock())
00567 #endif
00568       return false;
00569     }
00570   }
00571 }
00572 
00573 
00574 template<unsigned int num_doors>
00575 void
00576 MutexLock<num_doors>::unlock() {
00577   if(lockcount==0) {
00578     std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00579     return;
00580   }
00581 #ifdef DEBUG_MUTEX_LOCK
00582   if(state==NULL || state->buttons[LFrPawOffset])
00583     std::cerr << doors[owner_index].id << " unlock " << this << " level "<< lockcount << std::endl;
00584 #endif
00585   if(--lockcount==0) {
00586     if(owner_index!=NO_OWNER) {
00587       unsigned int tmp = owner_index;
00588       owner_index=NO_OWNER;
00589       doors[tmp].BL_in_use=false;
00590       doors[tmp].BL_ready=false;
00591       // *** Lock has been released *** //
00592 #ifndef PLATFORM_APERIOS
00593       if(owner_index==id) {
00594         thdLock[sem].unlock();
00595       }
00596 #endif
00597     }
00598   }
00599 }
00600 
00601 
00602 //! If you define this to do something more interesting, can use it to see what's going on in the locking process
00603 //#define mutexdebugout(i,c) { std::cout << ((char)(i==0?c:((i==1?'M':'a')+(c-'A')))) << std::flush; }
00604 
00605 
00606 template<unsigned int num_doors>
00607 bool
00608 MutexLock<num_doors>::do_try_lock(unsigned int i, bool block) {
00609   if(i==NO_OWNER) {
00610     std::cerr << "WARNING: new process attempted to lock beyond num_doors ("<<num_doors<<")" << std::endl;
00611     return false;
00612   }
00613 #ifdef DEBUG_MUTEX_LOCK
00614   if(state==NULL || state->buttons[LFrPawOffset])
00615     std::cerr << doors[i].id << " attempting lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00616 #endif  
00617   unsigned char S[num_doors]; // a local copy of everyone's doors
00618   // *** Entering FCFS doorway *** //
00619   doors[i].FCFS_in_use=true;
00620   for(unsigned int j=0; j<num_doors; j++)
00621     S[j]=doors[j].turn;
00622   doors[i].next_turn_bit=1-doors[i].next_turn_bit;
00623   doors[i].turn^=(1<<doors[i].next_turn_bit);
00624   doors[i].BL_ready=true;
00625   doors[i].FCFS_in_use=false;
00626   // *** Leaving FCFS doorway *** //
00627   for(unsigned int j=0; j<num_doors; j++) {
00628     while(doors[j].FCFS_in_use || (doors[j].BL_ready && S[j]==doors[j].turn))
00629       if(block)
00630         spin();
00631       else {
00632         doors[i].BL_ready=false;
00633 #ifdef DEBUG_MUTEX_LOCK
00634         if(state==NULL || state->buttons[LFrPawOffset])
00635           std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00636 #endif  
00637         return false;
00638       }
00639   }
00640   // *** Entering Burns-Lamport *** //
00641   do {
00642     doors[i].BL_in_use=true;
00643     for(unsigned int t=0; t<i; t++)
00644       if(doors[t].BL_in_use) {
00645         doors[i].BL_in_use=false;
00646         if(!block) {
00647           doors[i].BL_ready=false;
00648 #ifdef DEBUG_MUTEX_LOCK
00649           if(state==NULL || state->buttons[LFrPawOffset])
00650             std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00651 #endif  
00652           return false;
00653         }
00654         while(doors[t].BL_in_use)
00655           spin();
00656         break;
00657       }
00658   } while(!doors[i].BL_in_use);
00659   for(unsigned int t=i+1; t<num_doors; t++)
00660     while(doors[t].BL_in_use)
00661       spin();
00662   // *** Leaving Burns-Lamport ***//
00663   // *** Lock has been given *** //
00664   owner_index=i;
00665 #ifdef DEBUG_MUTEX_LOCK
00666   if(state==NULL || state->buttons[LFrPawOffset])
00667     std::cerr << doors[i].id << " received lock " << this << " at " << get_time() << std::endl;
00668 #endif  
00669   return true;
00670 }
00671 
00672 
00673 template<unsigned int num_doors>
00674 unsigned int
00675 MutexLock<num_doors>::lookup(int id) {
00676   // TODO - this could break if two new processes are adding themselves at the same time
00677   //        or an id is being forgotten at the same time
00678   //I'm expecting a very small number of processes to be involved
00679   //probably not worth overhead of doing something fancy like a sorted array
00680   unsigned int i;
00681   for(i=0; i<doors_used; i++)
00682     if(doors[i].id==id)
00683       return i;
00684   if(i==num_doors)
00685     return NO_OWNER;
00686   doors[i].id=id;
00687   doors_used++;
00688   return i;
00689 }
00690 
00691 
00692 template<unsigned int num_doors>
00693 void
00694 MutexLock<num_doors>::forget(int id) { //not tested thoroughly (or at all?)
00695   unsigned int i = lookup(id);
00696   do_try_lock(i,true);
00697   doors[i].id=doors[--doors_used].id;
00698   doors[doors_used].id=NO_OWNER;
00699   releaseAll();
00700 }
00701 
00702 #endif //MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00703 
00704 /*! @file 
00705  * @brief Defines MutexLock, a software only mutual exclusion lock.
00706  * @author ejt (Creator), Edward A. Lycklama, Vassos Hadzilacos (paper from which this was based)
00707  */
00708 
00709 #endif

Tekkotsu v5.1CVS
Generated Mon May 9 04:58:45 2016 by Doxygen 1.6.3