Tekkotsu Homepage
Demos
Overview
Downloads
Dev. Resources
Reference
Credits

MutexLock.h

Go to the documentation of this file.
00001 //-*-c++-*-
00002 #ifndef __MUTEX_LOCK_ET__
00003 #define __MUTEX_LOCK_ET__
00004 
00005 #include "Shared/Resource.h"
00006 #include "ProcessID.h"
00007 #include <iostream>
00008 #include <exception>
00009 #include <typeinfo>
00010 
00011 #ifndef PLATFORM_APERIOS
00012 #  include <unistd.h>
00013 #  include "SemaphoreManager.h"
00014 #  include "Thread.h"
00015 #endif
00016 
00017 // If you want to use the same software-only lock on both
00018 // PLATFORM_LOCAL and Aperios, then uncomment this next line:
00019 //#define MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00020 
00021 // However, that's probably only of use if you want to debug a problem with the lock itself
00022 
00023 
00024 
00025 //! The main purpose of this base class is actually to allow setting of usleep_granularity across all locks
00026 /*! It would be nice if we just put functions in here so we could
00027  *  reference locks without regard to the number of doors, but
00028  *  then all processes which use the lock would have to have been
00029  *  created via fork to handle virtual calls properly, and I don't
00030  *  want to put that overhead on the otherwise lightweight SoundPlay
00031  *  process under Aperios. */
00032 class MutexLockBase : public Resource {
00033 public:
00034   virtual ~MutexLockBase() {} //!< basic destructor
00035   
00036   static const unsigned int NO_OWNER=-1U; //!< marks as unlocked
00037   static unsigned int usleep_granularity; //!< the estimated cost in microseconds of usleep call itself -- value passed to usleep will be 10 times this (only used by software lock implementation on non-Aperios)
00038 
00039   //This section is only needed for the non-software-only locks, using the SemaphoreManager
00040 #ifndef PLATFORM_APERIOS
00041   class no_more_semaphores : public std::exception {
00042   public:
00043     no_more_semaphores() throw() : std::exception() {}
00044     virtual const char* what() const throw() { return "SemaphoreManager::getSemaphore() returned invalid()"; }
00045   };
00046   
00047   //! sets the SemaphoreManager which will hand out semaphores for any and all locks
00048   /*! see #preallocated for an explanation of why this function does what it does */
00049   static void setSemaphoreManager(SemaphoreManager* mgr) {
00050     if(mgr==NULL) {
00051       preallocated=*semgr;
00052       semgr=&preallocated;
00053     } else {
00054       *mgr=*semgr;
00055       semgr=mgr;
00056     }
00057   }
00058   static SemaphoreManager* getSemaphoreManager() {
00059     return semgr;
00060   }
00061   static void aboutToFork() {
00062     preallocated.aboutToFork();
00063   }
00064   
00065 protected:
00066   //! the global semaphore manager object for all locks, may point to preallocated during process initialization or destruction
00067   static SemaphoreManager* semgr;
00068   
00069   //! if a semaphore needs to be reserved, and #semgr is NULL, use #preallocated's current value and increment it
00070   /*! Here's the conundrum: each shared region needs a lock, each
00071    *  lock needs an ID from the semaphore manager, and the semaphore
00072    *  manager needs to be in a shared region to coordinate handing out
00073    *  IDs.  So this is resolved by having the locks check #semgr to see
00074    *  if it is initialized yet, and use this if it is not.
00075    *  Then, when the SemaphoreManager is assigned, we will copy over
00076    *  preallocated IDs from here
00077    *
00078    *  For reference, only MutexLock needs to worry about this because
00079    *  it's the only thing that's going to need an ID before the
00080    *  manager is created.*/
00081   static SemaphoreManager preallocated;
00082   
00083   //! need to have a thread lock available for each semaphore -- have to acquire the corresponding inter-thread lock before you can acquire the inter-process lock
00084   static ThreadNS::Lock thdLocks[SemaphoreManager::MAX_SEM];
00085 #endif
00086 };
00087 
00088 
00089 
00090 #if !defined(PLATFORM_APERIOS) && !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY)
00091 #include "SemaphoreManager.h"
00092 
00093 //! Implements a mutual exclusion lock using semaphores (SYSV style through SemaphoreManager)
00094 /*! Use this to prevent more than one process from accessing a data structure
00095  *  at the same time (which often leads to unpredictable and unexpected results)
00096  *
00097  *  The template parameter specifies the maximum number of different processes
00098  *  which need to be protected.  This needs to be allocated ahead of time, as
00099  *  there doesn't seem to be a way to dynamically scale as needed without
00100  *  risking possible errors if two processes are both trying to set up at the
00101  *  same time.  Also, by using a template parameter, all data structures are
00102  *  contained within the class's memory allocation, so no pointers are involved.
00103  *
00104  *  Locks in this class can be recursive or non-recursive, depending
00105  *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00106  *  you need to call unlock() 5 times as well before it will be
00107  *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00108  *  will undo all 5 levels of locking.
00109  *
00110  *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00111  *
00112  *  Note that there is no check that the process doing the unlocking is the one
00113  *  that actually has the lock.  Be careful about this.
00114  *
00115  *  @warning Doing mutual exclusion in software is tricky business, be careful about any
00116  *  modifications you make!
00117  */
00118 template<unsigned int num_doors>
00119 class MutexLock : public MutexLockBase {
00120 public:
00121   //! constructor, gets a new semaphore from the semaphore manager
00122   MutexLock()
00123     : sem(semgr->getSemaphore()), owner_index(NO_OWNER)
00124   {
00125     if(sem==semgr->invalid())
00126       throw no_more_semaphores();
00127     semgr->setValue(sem,0);
00128   }
00129   
00130   //! constructor, use this if you already have a semaphore id you want to use from semaphore manager
00131   MutexLock(SemaphoreManager::semid_t semid)
00132     : sem(semid), owner_index(NO_OWNER)
00133   {
00134     if(sem==semgr->invalid())
00135       throw no_more_semaphores();
00136     semgr->setValue(sem,0);
00137   }
00138   
00139   //! destructor, releases semaphore back to semaphore manager
00140   ~MutexLock() {
00141     if(semgr!=NULL && !semgr->hadFault())
00142       semgr->releaseSemaphore(sem);
00143     else
00144       std::cerr << "Warning: MutexLock leaked semaphore " << sem << " because SemaphoreManager is NULL" << std::endl;
00145     if(owner_index!=NO_OWNER) {
00146       owner_index=NO_OWNER;
00147       while(thdLocks[sem].getLockLevel()>0)
00148         thdLocks[sem].unlock();
00149     }
00150   }
00151   
00152   //! blocks until lock is achieved.  This is done efficiently using a SysV style semaphore
00153   /*! You should pass some process-specific ID number as the input - just
00154    *  make sure no other process will be using the same value. */
00155   void lock(int id) {
00156     thdLocks[sem].lock();
00157     if(owner_index!=static_cast<unsigned>(id)) {
00158       //have to wait and then claim lock
00159       if(semgr!=NULL && !semgr->hadFault()) {
00160         semgr->testZero_add(sem,1);
00161       } else
00162         std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00163       owner_index=id;
00164     } else {
00165       //we already have lock, add one to its lock level
00166       if(semgr!=NULL && !semgr->hadFault())
00167         semgr->raise(sem,1);
00168       else
00169         std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00170     }
00171   }
00172   
00173   //! attempts to get a lock, returns true if it succeeds
00174   /*! You should pass some process-specific ID number as the input - just
00175    *  make sure no other process will be using the same value.*/
00176   bool try_lock(int id) {
00177     if(!thdLocks[sem].trylock())
00178       return false;
00179     if(semgr==NULL || semgr->hadFault()) {
00180       std::cerr << "Warning: MutexLock assuming try_lock success of " << sem << " because SemaphoreManager is NULL" << std::endl;
00181       owner_index=id;
00182       return true;
00183     }
00184     if(owner()==id) {
00185       //we already have lock, add one to its lock level
00186       semgr->raise(sem,1);
00187       return true;
00188     } else {
00189       if(semgr->testZero_add(sem,1,false)) {
00190         owner_index=id;
00191         return true;
00192       } else {
00193         thdLocks[sem].unlock();
00194         return false;
00195       }
00196     }
00197   }
00198   
00199   //! releases one recursive lock-level from whoever has the current lock
00200   inline void unlock() {
00201     if(semgr==NULL || semgr->hadFault()) {
00202       std::cerr << "Warning: MutexLock assuming unlock of " << sem << " from " << owner_index << " because SemaphoreManager is NULL" << std::endl;
00203       owner_index=NO_OWNER;
00204       thdLocks[sem].unlock();
00205       return;
00206     }
00207     if(semgr->getValue(sem)<=0) {
00208       std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00209       owner_index=NO_OWNER;
00210       thdLocks[sem].unlock();
00211       return;
00212     }
00213     if(semgr->getValue(sem)==1)
00214       owner_index=NO_OWNER;
00215     if(!semgr->lower(sem,1,false))
00216       std::cerr << "Warning: MutexLock::unlock caused strange underflow" << std::endl;
00217     thdLocks[sem].unlock();
00218   }
00219   
00220   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00221   void releaseAll() {
00222     owner_index=NO_OWNER;
00223     if(semgr==NULL || semgr->hadFault()) {
00224       std::cerr << "Warning: MutexLock assuming releaseAll of " << sem << " because SemaphoreManager is NULL" << std::endl;
00225       return;
00226     }
00227     semgr->setValue(sem,0);
00228     while(thdLocks[sem].getLockLevel()>0)
00229       thdLocks[sem].unlock();
00230   }
00231   
00232   //! returns the lockcount
00233   unsigned int get_lock_level() const {
00234     if(semgr==NULL || semgr->hadFault())
00235       return (owner_index==NO_OWNER) ? 0 : 1;
00236     else
00237       return semgr->getValue(sem);
00238   }
00239   
00240   //! returns the current owner's id
00241   inline int owner() const { return owner_index; }
00242   
00243 protected:
00244   friend class MarkScope;
00245   virtual void useResource(Resource::Data&) {
00246     lock(ProcessID::getID());
00247   }
00248   virtual void releaseResource(Resource::Data&) {
00249     unlock();
00250   }
00251   
00252   SemaphoreManager::semid_t sem; //!< the SysV semaphore number
00253   unsigned int owner_index; //!< holds the tekkotsu process id of the current lock owner
00254 };
00255 
00256 
00257 
00258 
00259 #else //SOFTWARE ONLY mutual exclusion, used on Aperios, or if MUTEX_LOCK_ET_USE_SOFTWARE_ONLY is defined
00260 
00261 
00262 //#define MUTEX_LOCK_ET_USE_SPINCOUNT
00263 
00264 // if DEBUG_MUTEX_LOCK is defined, we'll display information about each lock
00265 // access while the left front paw button is pressed
00266 //#define DEBUG_MUTEX_LOCK
00267 
00268 #ifdef DEBUG_MUTEX_LOCK
00269 #  include "Shared/WorldState.h"
00270 #endif
00271 
00272 //! A software only mutual exclusion lock. (does not depend on processor or OS support)
00273 /*! Use this to prevent more than one process from accessing a data structure
00274  *  at the same time (which often leads to unpredictable and unexpected results)
00275  *
00276  *  The template parameter specifies the maximum number of different processes
00277  *  which need to be protected.  This needs to be allocated ahead of time, as
00278  *  there doesn't seem to be a way to dynamically scale as needed without
00279  *  risking possible errors if two processes are both trying to set up at the
00280  *  same time.  Also, by using a template parameter, all data structures are
00281  *  contained within the class's memory allocation, so no pointers are involved.
00282  *
00283  *  Locks in this class can be recursive or non-recursive, depending
00284  *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00285  *  you need to call unlock() 5 times as well before it will be
00286  *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00287  *  will undo all 5 levels of locking.
00288  *
00289  *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00290  *
00291  *  Note that there is no check that the process doing the unlocking is the one
00292  *  that actually has the lock.  Be careful about this.
00293  *
00294  *  @warning Doing mutual exclusion in software is tricky business, be careful about any
00295  *  modifications you make!
00296  *
00297  * Implements a first-come-first-served Mutex as laid out on page 11 of: \n
00298  * "A First Come First Served Mutal Exclusion Algorithm with Small Communication Variables" \n
00299  * Edward A. Lycklama, Vassos Hadzilacos - Aug. 1991
00300 */
00301 template<unsigned int num_doors>
00302 class MutexLock : public MutexLockBase {
00303  public:
00304   //! constructor, just calls the init() function.
00305   MutexLock() : doors_used(0), owner_index(NO_OWNER), lockcount(0) { init();  }
00306 
00307 #ifndef PLATFORM_APERIOS
00308   //! destructor, re-enables thread cancelability if lock was held (non-aperios only)
00309   ~MutexLock() {
00310     if(owner_index!=NO_OWNER) {
00311       owner_index=NO_OWNER;
00312       thdLock[sem].unlock();
00313     }
00314   }
00315 #endif
00316 
00317   //! blocks (by busy looping on do_try_lock()) until a lock is achieved
00318   /*! You should pass some process-specific ID number as the input - just
00319    *  make sure no other process will be using the same value.
00320    *  @todo - I'd like to not use a loop here */
00321   void lock(int id);
00322 
00323   //! attempts to get a lock, returns true if it succeeds
00324   /*! You should pass some process-specific ID number as the input - just
00325    *  make sure no other process will be using the same value.*/
00326   bool try_lock(int id);
00327 
00328   //! releases one recursive lock-level from whoever has the current lock
00329   inline void unlock();
00330 
00331   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00332   void releaseAll() { lockcount=1; unlock(); }
00333   
00334   //! returns the lockcount
00335   unsigned int get_lock_level() const { return lockcount; }
00336 
00337   //! returns the current owner's id
00338   inline int owner() const { return owner_index==NO_OWNER ? NO_OWNER : doors[owner_index].id; }
00339 
00340   //! allows you to reset one of the possible owners, so another process can take its place.  This is not tested
00341   void forget(int id);
00342 
00343 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00344   inline unsigned int getSpincount() { return spincount; } //!< returns the number of times the spin() function has been called
00345   inline unsigned int resetSpincount() { spincount=0; } //!< resets the counter of the number of times the spin() function has been called
00346 #endif
00347   
00348  protected:
00349   friend class MarkScope;
00350   virtual void useResource(Resource::Data&) {
00351     lock(ProcessID::getID());
00352   }
00353   virtual void releaseResource(Resource::Data&) {
00354     unlock();
00355   }
00356   
00357   //! Does the work of trying to get a lock
00358   /*! Pass @c true for @a block if you want it to use FCFS blocking
00359    *  instead of just returning right away if another process has the lock */
00360   bool do_try_lock(unsigned int index, bool block);
00361 
00362   //! returns the internal index mapping to the id number supplied by the process
00363   unsigned int lookup(int id); //may create a new entry
00364 
00365 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00366   volatile unsigned int spincount; //!< handy to track how much time we're wasting
00367   void init() { spincount=0; }//memset((void*)doors,0,sizeof(doors)); } //!< just resets spincount
00368   inline void spin() {
00369     spincount++;
00370 #ifndef PLATFORM_APERIOS
00371     usleep(usleep_granularity*10); //this is a carefully chosen value intended to solve all the world's problems (not)
00372 #endif
00373   } //!< if you find a way to sleep for a few microseconds instead of busy waiting, put it here
00374 #else
00375   void init() { } //!< Doesn't do anything if you have the MUTEX_LOCK_ET_USE_SPINCOUNT undef'ed.  Used to do a memset, but that was causing problems....
00376   //memset((void*)doors,0,sizeof(doors)); } 
00377   inline void spin() {
00378 #ifndef PLATFORM_APERIOS
00379     usleep(usleep_granularity*10); //this is a carefully chosen value intended to solve all the world's problems (not)
00380 #endif
00381   } //!< If you find a way to sleep for a few microseconds instead of busy waiting, put it here
00382 #endif
00383     
00384   //! Holds per process shared info, one of these per process
00385   struct door_t {
00386     door_t() : id(NO_OWNER), FCFS_in_use(false), BL_ready(false), BL_in_use(false), turn('\0'), next_turn_bit('\0') {} //!< constructor
00387     //door_t(int i) : id(i), FCFS_in_use(false), BL_ready(false), BL_in_use(false), next_turn_bit('\0') {}
00388     int id; //!< process ID this doorway is assigned to
00389     volatile bool FCFS_in_use; //!< In FCFS doorway, corresponds to 'c_i'
00390     volatile bool BL_ready; //!< Signals past FCFS doorway, ready for BL doorway, corresponds to 'v_i'
00391     volatile bool BL_in_use; //!< Burns-Lamport doorway, corresponds to 'x_i'
00392     volatile unsigned char turn; //!< clock pulse, initial value doesn't matter
00393     unsigned char next_turn_bit; //!< selects which bit of turn will be flipped next
00394   };
00395 
00396   door_t doors[num_doors]; //!< holds all the doors
00397   unsigned int doors_used; //!< counts the number of doors used
00398   unsigned int owner_index; //!< holds the door index of the current lock owner
00399   unsigned int lockcount; //!< the depth of the lock, 0 when unlocked
00400 };
00401 
00402 
00403 template<unsigned int num_doors>
00404 void
00405 MutexLock<num_doors>::lock(int id) {
00406 #ifndef PLATFORM_APERIOS
00407   thdLock[sem].lock();
00408 #endif
00409   if(owner()!=id) {
00410     if(!do_try_lock(lookup(id),true)) {
00411       //spin(); //note the block argument above -- should never spin if that is actually working
00412       std::cout << "Warning: lock() failed to achieve lock" << std::endl;
00413     }
00414   } else {
00415 #ifdef DEBUG_MUTEX_LOCK
00416     if(state==NULL || state->buttons[LFrPawOffset])
00417       std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00418 #endif
00419   }
00420   lockcount++;
00421 }
00422 
00423 
00424 template<unsigned int num_doors>
00425 bool
00426 MutexLock<num_doors>::try_lock(int id) {
00427 #ifndef PLATFORM_APERIOS
00428   if(!thdLock[sem].trylock())
00429     return false;
00430 #endif
00431   if(owner()==id) {
00432 #ifdef DEBUG_MUTEX_LOCK
00433     if(state==NULL || state->buttons[LFrPawOffset])
00434       std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00435 #endif
00436     lockcount++;
00437     return true;
00438   } else {
00439     if(do_try_lock(lookup(id),false)) {
00440       lockcount++;
00441       return true;
00442     } else {
00443 #ifndef PLATFORM_APERIOS
00444       thdLock[sem].unlock())
00445 #endif
00446       return false;
00447     }
00448   }
00449 }
00450 
00451 
00452 template<unsigned int num_doors>
00453 void
00454 MutexLock<num_doors>::unlock() {
00455   if(lockcount==0) {
00456     std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00457     return;
00458   }
00459 #ifdef DEBUG_MUTEX_LOCK
00460   if(state==NULL || state->buttons[LFrPawOffset])
00461     std::cerr << doors[owner_index].id << " unlock " << this << " level "<< lockcount << std::endl;
00462 #endif
00463   if(--lockcount==0) {
00464     if(owner_index!=NO_OWNER) {
00465       unsigned int tmp = owner_index;
00466       owner_index=NO_OWNER;
00467       doors[tmp].BL_in_use=false;
00468       doors[tmp].BL_ready=false;
00469       // *** Lock has been released *** //
00470 #ifndef PLATFORM_APERIOS
00471       if(owner_index==id) {
00472         thdLock[sem].unlock();
00473       }
00474 #endif
00475     }
00476   }
00477 }
00478 
00479 
00480 //! If you define this to do something more interesting, can use it to see what's going on in the locking process
00481 //#define mutexdebugout(i,c) { std::cout << ((char)(i==0?c:((i==1?'M':'a')+(c-'A')))) << std::flush; }
00482 
00483 
00484 template<unsigned int num_doors>
00485 bool
00486 MutexLock<num_doors>::do_try_lock(unsigned int i, bool block) {
00487   if(i==NO_OWNER) {
00488     std::cerr << "WARNING: new process attempted to lock beyond num_doors ("<<num_doors<<")" << std::endl;
00489     return false;
00490   }
00491 #ifdef DEBUG_MUTEX_LOCK
00492   if(state==NULL || state->buttons[LFrPawOffset])
00493     std::cerr << doors[i].id << " attempting lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00494 #endif  
00495   unsigned char S[num_doors]; // a local copy of everyone's doors
00496   // *** Entering FCFS doorway *** //
00497   doors[i].FCFS_in_use=true;
00498   for(unsigned int j=0; j<num_doors; j++)
00499     S[j]=doors[j].turn;
00500   doors[i].next_turn_bit=1-doors[i].next_turn_bit;
00501   doors[i].turn^=(1<<doors[i].next_turn_bit);
00502   doors[i].BL_ready=true;
00503   doors[i].FCFS_in_use=false;
00504   // *** Leaving FCFS doorway *** //
00505   for(unsigned int j=0; j<num_doors; j++) {
00506     while(doors[j].FCFS_in_use || (doors[j].BL_ready && S[j]==doors[j].turn))
00507       if(block)
00508         spin();
00509       else {
00510         doors[i].BL_ready=false;
00511 #ifdef DEBUG_MUTEX_LOCK
00512         if(state==NULL || state->buttons[LFrPawOffset])
00513           std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00514 #endif  
00515         return false;
00516       }
00517   }
00518   // *** Entering Burns-Lamport *** //
00519   do {
00520     doors[i].BL_in_use=true;
00521     for(unsigned int t=0; t<i; t++)
00522       if(doors[t].BL_in_use) {
00523         doors[i].BL_in_use=false;
00524         if(!block) {
00525           doors[i].BL_ready=false;
00526 #ifdef DEBUG_MUTEX_LOCK
00527           if(state==NULL || state->buttons[LFrPawOffset])
00528             std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00529 #endif  
00530           return false;
00531         }
00532         while(doors[t].BL_in_use)
00533           spin();
00534         break;
00535       }
00536   } while(!doors[i].BL_in_use);
00537   for(unsigned int t=i+1; t<num_doors; t++)
00538     while(doors[t].BL_in_use)
00539       spin();
00540   // *** Leaving Burns-Lamport ***//
00541   // *** Lock has been given *** //
00542   owner_index=i;
00543 #ifdef DEBUG_MUTEX_LOCK
00544   if(state==NULL || state->buttons[LFrPawOffset])
00545     std::cerr << doors[i].id << " received lock " << this << " at " << get_time() << std::endl;
00546 #endif  
00547   return true;
00548 }
00549 
00550 
00551 template<unsigned int num_doors>
00552 unsigned int
00553 MutexLock<num_doors>::lookup(int id) {
00554   // TODO - this could break if two new processes are adding themselves at the same time
00555   //        or an id is being forgotten at the same time
00556   //I'm expecting a very small number of processes to be involved
00557   //probably not worth overhead of doing something fancy like a sorted array
00558   unsigned int i;
00559   for(i=0; i<doors_used; i++)
00560     if(doors[i].id==id)
00561       return i;
00562   if(i==num_doors)
00563     return NO_OWNER;
00564   doors[i].id=id;
00565   doors_used++;
00566   return i;
00567 }
00568 
00569 
00570 template<unsigned int num_doors>
00571 void
00572 MutexLock<num_doors>::forget(int id) { //not tested thoroughly (or at all?)
00573   unsigned int i = lookup(id);
00574   do_try_lock(i,true);
00575   doors[i].id=doors[--doors_used].id;
00576   doors[doors_used].id=NO_OWNER;
00577   releaseAll();
00578 }
00579 
00580 #endif //MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00581 
00582 /*! @file 
00583  * @brief Defines MutexLock, a software only mutual exclusion lock.
00584  * @author ejt (Creator), Edward A. Lycklama, Vassos Hadzilacos (paper from which this was based)
00585  *
00586  * $Author: ejt $
00587  * $Name: tekkotsu-3_0 $
00588  * $Revision: 1.16 $
00589  * $State: Exp $
00590  * $Date: 2006/08/11 21:51:40 $
00591  */
00592 
00593 #endif

Tekkotsu v3.0
Generated Wed Oct 4 00:03:44 2006 by Doxygen 1.4.7