Tekkotsu Homepage
Demos
Overview
Downloads
Dev. Resources
Reference
Credits

MutexLock.h

Go to the documentation of this file.
00001 //-*-c++-*-
00002 #ifndef __MUTEX_LOCK_ET__
00003 #define __MUTEX_LOCK_ET__
00004 
00005 #include "Shared/Resource.h"
00006 #include "ProcessID.h"
00007 #include <iostream>
00008 #include <exception>
00009 #include <typeinfo>
00010 
00011 #ifndef PLATFORM_APERIOS
00012 #  include "SemaphoreManager.h"
00013 #  if !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY) && TEKKOTSU_SHM_STYLE==NO_SHM
00014 #    include "Thread.h"
00015 #  endif
00016 #  include "RCRegion.h"
00017 #  include <unistd.h>
00018 #  include <pthread.h>
00019 #endif
00020 
00021 // If you want to use the same software-only lock on both
00022 // PLATFORM_LOCAL and Aperios, then uncomment this next line:
00023 //#define MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00024 
00025 // However, that's probably only of use if you want to debug a problem with the lock itself
00026 
00027 
00028 
00029 //! The main purpose of this base class is actually to allow setting of usleep_granularity across all locks
00030 /*! It would be nice if we just put functions in here so we could
00031  *  reference locks without regard to the number of doors, but
00032  *  then all processes which use the lock would have to have been
00033  *  created via fork to handle virtual calls properly, and I don't
00034  *  want to put that overhead on the otherwise lightweight SoundPlay
00035  *  process under Aperios. */
00036 class MutexLockBase : public Resource {
00037 public:
00038   virtual ~MutexLockBase() {} //!< basic destructor
00039   
00040   static const unsigned int NO_OWNER=-1U; //!< marks as unlocked
00041   static unsigned int usleep_granularity; //!< the estimated cost in microseconds of usleep call itself -- value passed to usleep will be 10 times this (only used by software lock implementation on non-Aperios)
00042 
00043   //This section is only needed for the non-software-only locks, using the SemaphoreManager
00044 #if !defined(PLATFORM_APERIOS) && TEKKOTSU_SHM_STYLE!=NO_SHM
00045   //! exception if a lock is created but there aren't any more semaphores available
00046   class no_more_semaphores : public std::exception {
00047   public:
00048     //! constructor
00049     no_more_semaphores() throw() : std::exception() {}
00050     //! returns a constant message string
00051     virtual const char* what() const throw() { return "SemaphoreManager::getSemaphore() returned invalid()"; }
00052   };
00053   
00054   //! sets the SemaphoreManager which will hand out semaphores for any and all locks
00055   /*! see #preallocated for an explanation of why this function does what it does */
00056   static void setSemaphoreManager(SemaphoreManager* mgr) {
00057     if(mgr==NULL) {
00058       preallocated=*semgr;
00059       semgr=&preallocated;
00060     } else {
00061       *mgr=*semgr;
00062       semgr=mgr;
00063     }
00064   }
00065   //! returns #semgr;
00066   static SemaphoreManager* getSemaphoreManager() {
00067     return semgr;
00068   }
00069   //! this should be called if a fork() is about to occur (need to pass notification on to #preallocated)
00070   static void aboutToFork() {
00071     preallocated.aboutToFork();
00072   }
00073   
00074 protected:
00075   //! the global semaphore manager object for all locks, may point to preallocated during process initialization or destruction
00076   static SemaphoreManager* semgr;
00077   
00078   //! if a semaphore needs to be reserved, and #semgr is NULL, use #preallocated's current value and increment it
00079   /*! Here's the conundrum: each shared region needs a lock, each
00080    *  lock needs an ID from the semaphore manager, and the semaphore
00081    *  manager needs to be in a shared region to coordinate handing out
00082    *  IDs.  So this is resolved by having the locks check #semgr to see
00083    *  if it is initialized yet, and use this if it is not.
00084    *  Then, when the SemaphoreManager is assigned, we will copy over
00085    *  preallocated IDs from here
00086    *
00087    *  For reference, only MutexLock needs to worry about this because
00088    *  it's the only thing that's going to need an ID before the
00089    *  manager is created.*/
00090   static SemaphoreManager preallocated;
00091   
00092 #elif TEKKOTSU_SHM_STYLE==NO_SHM
00093 public:
00094   static void aboutToFork() {}
00095 #endif
00096 };
00097 
00098 
00099 
00100 #if !defined(PLATFORM_APERIOS) && !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY)
00101 #if TEKKOTSU_SHM_STYLE==NO_SHM
00102 
00103 //! Implements a mutual exclusion lock using pthread mutex
00104 /*! Use this to prevent more than one thread from accessing a data structure
00105 *  at the same time (which often leads to unpredictable and unexpected results)
00106 *
00107 *  The template parameter is not used (only needed if compiling with IPC enabled)
00108 *
00109 *  Locks in this class can be recursive or non-recursive, depending
00110 *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00111 *  you need to call unlock() 5 times as well before it will be
00112 *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00113 *  will undo all 5 levels of locking.
00114 *
00115 *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00116 *
00117 *  Note that there is no check that the thread doing the unlocking is the one
00118 *  that actually has the lock.  Be careful about this.
00119 */
00120 template<unsigned int num_doors>
00121 class MutexLock : public MutexLockBase {
00122 public:
00123   //! constructor, gets a new semaphore from the semaphore manager
00124   MutexLock() : owner_index(NO_OWNER), thdLock() {}
00125   
00126   //! destructor, releases semaphore back to semaphore manager
00127   ~MutexLock() {
00128     if(owner_index!=NO_OWNER) {
00129       owner_index=NO_OWNER;
00130       while(thdLock.getLockLevel()>0)
00131         thdLock.unlock();
00132     }
00133   }
00134   
00135   //! blocks until lock is achieved.  This is done efficiently using a SysV style semaphore
00136   /*! You should pass some process-specific ID number as the input - just
00137    *  make sure no other process will be using the same value. */
00138   void lock(int id) {
00139     thdLock.lock();
00140     if(owner_index!=static_cast<unsigned>(id))
00141       owner_index=id;
00142   }
00143   
00144   //! attempts to get a lock, returns true if it succeeds
00145   /*! You should pass some process-specific ID number as the input - just
00146    *  make sure no other process will be using the same value.*/
00147   bool try_lock(int id) {
00148     if(!thdLock.trylock())
00149       return false;
00150     owner_index=id;
00151     return true;
00152   }
00153   
00154   //! releases one recursive lock-level from whoever has the current lock
00155   inline void unlock() {
00156     if(thdLock.getLockLevel()<=0)
00157       std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00158     if(thdLock.getLockLevel()<=1)
00159       owner_index=NO_OWNER;
00160     thdLock.unlock();
00161   }
00162   
00163   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00164   void releaseAll() {
00165     owner_index=NO_OWNER;
00166     while(thdLock.getLockLevel()>0)
00167       thdLock.unlock();
00168   }
00169   
00170   //! returns the lockcount
00171   unsigned int get_lock_level() const { return thdLock.getLockLevel(); }
00172   
00173   //! returns the current owner's id
00174   inline int owner() const { return owner_index; }
00175   
00176 protected:
00177   friend class MarkScope;
00178   virtual void useResource(Resource::Data&) { lock(ProcessID::getID()); }
00179   virtual void releaseResource(Resource::Data&) { unlock(); }
00180   
00181   unsigned int owner_index; //!< holds the tekkotsu process id of the current lock owner
00182   ThreadNS::Lock thdLock; //!< all the actual implementation is handed off to the thread lock
00183 };
00184 
00185 #else /* IPC Lock using Semaphores*/
00186 #  include "SemaphoreManager.h"
00187 
00188 //! Implements a mutual exclusion lock using semaphores (SYSV style through SemaphoreManager)
00189 /*! Use this to prevent more than one process from accessing a data structure
00190  *  at the same time (which often leads to unpredictable and unexpected results)
00191  *
00192  *  The template parameter specifies the maximum number of different processes
00193  *  which need to be protected.  This needs to be allocated ahead of time, as
00194  *  there doesn't seem to be a way to dynamically scale as needed without
00195  *  risking possible errors if two processes are both trying to set up at the
00196  *  same time.  Also, by using a template parameter, all data structures are
00197  *  contained within the class's memory allocation, so no pointers are involved.
00198  *
00199  *  Locks in this class can be recursive or non-recursive, depending
00200  *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00201  *  you need to call unlock() 5 times as well before it will be
00202  *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00203  *  will undo all 5 levels of locking.
00204  *
00205  *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00206  *
00207  *  Note that there is no check that the process doing the unlocking is the one
00208  *  that actually has the lock.  Be careful about this.
00209  *
00210  *  @warning Doing mutual exclusion in software is tricky business, be careful about any
00211  *  modifications you make!
00212  */
00213 template<unsigned int num_doors>
00214 class MutexLock : public MutexLockBase {
00215 public:
00216   //! constructor, gets a new semaphore from the semaphore manager
00217   MutexLock() : MutexLockBase(), 
00218     sem(semgr->getSemaphore()), owner_index(NO_OWNER), owner_thread()
00219   {
00220     if(sem==semgr->invalid())
00221       throw no_more_semaphores();
00222     semgr->setValue(sem,0);
00223   }
00224   
00225   //! constructor, use this if you already have a semaphore id you want to use from semaphore manager
00226   MutexLock(SemaphoreManager::semid_t semid) : MutexLockBase(), 
00227     sem(semid), owner_index(NO_OWNER), owner_thread()
00228   {
00229     if(sem==semgr->invalid())
00230       throw no_more_semaphores();
00231     semgr->setValue(sem,0);
00232   }
00233   
00234   //! destructor, releases semaphore back to semaphore manager
00235   ~MutexLock() {
00236     owner_index=NO_OWNER;
00237     if(semgr!=NULL && !semgr->hadFault())
00238       semgr->releaseSemaphore(sem);
00239     else
00240       std::cerr << "Warning: MutexLock leaked semaphore " << sem << " because SemaphoreManager is NULL" << std::endl;
00241   }
00242   
00243   //! blocks until lock is achieved.  This is done efficiently using a SysV style semaphore
00244   /*! You should pass some process-specific ID number as the input - just
00245    *  make sure no other process will be using the same value. */
00246   void lock(int id) {
00247     if(owner_index!=static_cast<unsigned>(id) || !isOwnerThread()) {
00248       //have to wait and then claim lock
00249       if(semgr!=NULL && !semgr->hadFault()) {
00250         semgr->testZero_add(sem,1);
00251       } else
00252         std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00253       owner_index=id;
00254       owner_thread=pthread_self();
00255     } else {
00256       //we already have lock, add one to its lock level
00257       if(semgr!=NULL && !semgr->hadFault())
00258         semgr->raise(sem,1);
00259       else
00260         std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00261     }
00262   }
00263   
00264   //! attempts to get a lock, returns true if it succeeds
00265   /*! You should pass some process-specific ID number as the input - just
00266    *  make sure no other process will be using the same value.*/
00267   bool try_lock(int id) {
00268     if(semgr==NULL || semgr->hadFault()) {
00269       std::cerr << "Warning: MutexLock assuming try_lock success of " << sem << " because SemaphoreManager is NULL" << std::endl;
00270       owner_index=id;
00271       return true;
00272     }
00273     if(owner()==id && isOwnerThread()) {
00274       //we already have lock, add one to its lock level
00275       semgr->raise(sem,1);
00276       return true;
00277     } else {
00278       if(semgr->testZero_add(sem,1,false)) {
00279         owner_index=id;
00280         owner_thread=pthread_self();
00281         return true;
00282       } else {
00283         return false;
00284       }
00285     }
00286   }
00287   
00288   //! releases one recursive lock-level from whoever has the current lock
00289   inline void unlock() {
00290     if(semgr==NULL || semgr->hadFault()) {
00291       std::cerr << "Warning: MutexLock assuming unlock of " << sem << " from " << owner_index << " because SemaphoreManager is NULL" << std::endl;
00292       owner_index=NO_OWNER;
00293       return;
00294     }
00295     if(semgr->getValue(sem)<=0) {
00296       std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00297       owner_index=NO_OWNER;
00298       return;
00299     }
00300     if(semgr->getValue(sem)==1)
00301       owner_index=NO_OWNER;
00302     if(!semgr->lower(sem,1,false))
00303       std::cerr << "Warning: MutexLock::unlock caused strange underflow" << std::endl;
00304   }
00305   
00306   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00307   void releaseAll() {
00308     owner_index=NO_OWNER;
00309     if(semgr==NULL || semgr->hadFault()) {
00310       std::cerr << "Warning: MutexLock assuming releaseAll of " << sem << " because SemaphoreManager is NULL" << std::endl;
00311       return;
00312     }
00313     semgr->setValue(sem,0);
00314   }
00315   
00316   //! returns the lockcount
00317   unsigned int get_lock_level() const {
00318     if(semgr==NULL || semgr->hadFault())
00319       return (owner_index==NO_OWNER) ? 0 : 1;
00320     else
00321       return semgr->getValue(sem);
00322   }
00323   
00324   //! returns the current owner's id
00325   inline int owner() const { return owner_index; }
00326   
00327 protected:
00328   //! returns true if the current thread is the one which owns the lock
00329   bool isOwnerThread() {
00330     pthread_t cur=pthread_self();
00331     return pthread_equal(cur,owner_thread);
00332   }
00333   friend class MarkScope;
00334   virtual void useResource(Resource::Data&) {
00335     lock(ProcessID::getID());
00336   }
00337   virtual void releaseResource(Resource::Data&) {
00338     unlock();
00339   }
00340   
00341   SemaphoreManager::semid_t sem; //!< the SysV semaphore number
00342   unsigned int owner_index; //!< holds the tekkotsu process id of the current lock owner
00343   pthread_t owner_thread; //!< holds a thread id for the owner thread
00344   
00345 private:
00346   MutexLock(const MutexLock& ml); //!< copy constructor, do not call
00347   MutexLock& operator=(const MutexLock& ml); //!< assignment, do not call
00348 };
00349 
00350 
00351 
00352 
00353 #endif /* uni-process or (potentially) multi-process lock? */
00354 #else //SOFTWARE ONLY mutual exclusion, used on Aperios, or if MUTEX_LOCK_ET_USE_SOFTWARE_ONLY is defined
00355 
00356 
00357 //#define MUTEX_LOCK_ET_USE_SPINCOUNT
00358 
00359 // if DEBUG_MUTEX_LOCK is defined, we'll display information about each lock
00360 // access while the left front paw button is pressed
00361 //#define DEBUG_MUTEX_LOCK
00362 
00363 #ifdef DEBUG_MUTEX_LOCK
00364 #  include "Shared/WorldState.h"
00365 #endif
00366 
00367 //! A software only mutual exclusion lock. (does not depend on processor or OS support)
00368 /*! Use this to prevent more than one process from accessing a data structure
00369  *  at the same time (which often leads to unpredictable and unexpected results)
00370  *
00371  *  The template parameter specifies the maximum number of different processes
00372  *  which need to be protected.  This needs to be allocated ahead of time, as
00373  *  there doesn't seem to be a way to dynamically scale as needed without
00374  *  risking possible errors if two processes are both trying to set up at the
00375  *  same time.  Also, by using a template parameter, all data structures are
00376  *  contained within the class's memory allocation, so no pointers are involved.
00377  *
00378  *  Locks in this class can be recursive or non-recursive, depending
00379  *  whether you call releaseAll() or unlock().  If you lock 5 times, then
00380  *  you need to call unlock() 5 times as well before it will be
00381  *  unlocked.  However, if you lock 5 times, just one call to releaseAll()
00382  *  will undo all 5 levels of locking.
00383  *
00384  *  Just remember, unlock() releases one level.  But releaseAll() completely unlocks.
00385  *
00386  *  Note that there is no check that the process doing the unlocking is the one
00387  *  that actually has the lock.  Be careful about this.
00388  *
00389  *  @warning Doing mutual exclusion in software is tricky business, be careful about any
00390  *  modifications you make!
00391  *
00392  * Implements a first-come-first-served Mutex as laid out on page 11 of: \n
00393  * "A First Come First Served Mutal Exclusion Algorithm with Small Communication Variables" \n
00394  * Edward A. Lycklama, Vassos Hadzilacos - Aug. 1991
00395 */
00396 template<unsigned int num_doors>
00397 class MutexLock : public MutexLockBase {
00398  public:
00399   //! constructor, just calls the init() function.
00400   MutexLock() : doors_used(0), owner_index(NO_OWNER), lockcount(0) { init();  }
00401 
00402 #ifndef PLATFORM_APERIOS
00403   //! destructor, re-enables thread cancelability if lock was held (non-aperios only)
00404   ~MutexLock() {
00405     if(owner_index!=NO_OWNER) {
00406       owner_index=NO_OWNER;
00407       thdLock[sem].unlock();
00408     }
00409   }
00410 #endif
00411 
00412   //! blocks (by busy looping on do_try_lock()) until a lock is achieved
00413   /*! You should pass some process-specific ID number as the input - just
00414    *  make sure no other process will be using the same value.
00415    *  @todo - I'd like to not use a loop here */
00416   void lock(int id);
00417 
00418   //! attempts to get a lock, returns true if it succeeds
00419   /*! You should pass some process-specific ID number as the input - just
00420    *  make sure no other process will be using the same value.*/
00421   bool try_lock(int id);
00422 
00423   //! releases one recursive lock-level from whoever has the current lock
00424   inline void unlock();
00425 
00426   //! completely unlocks, regardless of how many times a recursive lock has been obtained
00427   void releaseAll() { lockcount=1; unlock(); }
00428   
00429   //! returns the lockcount
00430   unsigned int get_lock_level() const { return lockcount; }
00431 
00432   //! returns the current owner's id
00433   inline int owner() const { return owner_index==NO_OWNER ? NO_OWNER : doors[owner_index].id; }
00434 
00435   //! allows you to reset one of the possible owners, so another process can take its place.  This is not tested
00436   void forget(int id);
00437 
00438 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00439   inline unsigned int getSpincount() { return spincount; } //!< returns the number of times the spin() function has been called
00440   inline unsigned int resetSpincount() { spincount=0; } //!< resets the counter of the number of times the spin() function has been called
00441 #endif
00442   
00443  protected:
00444   friend class MarkScope;
00445   virtual void useResource(Resource::Data&) {
00446     lock(ProcessID::getID());
00447   }
00448   virtual void releaseResource(Resource::Data&) {
00449     unlock();
00450   }
00451   
00452   //! Does the work of trying to get a lock
00453   /*! Pass @c true for @a block if you want it to use FCFS blocking
00454    *  instead of just returning right away if another process has the lock */
00455   bool do_try_lock(unsigned int index, bool block);
00456 
00457   //! returns the internal index mapping to the id number supplied by the process
00458   unsigned int lookup(int id); //may create a new entry
00459 
00460 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00461   volatile unsigned int spincount; //!< handy to track how much time we're wasting
00462   void init() { spincount=0; }//memset((void*)doors,0,sizeof(doors)); } //!< just resets spincount
00463   inline void spin() {
00464     spincount++;
00465 #ifndef PLATFORM_APERIOS
00466     usleep(usleep_granularity*10); //this is a carefully chosen value intended to solve all the world's problems (not)
00467 #endif
00468   } //!< if you find a way to sleep for a few microseconds instead of busy waiting, put it here
00469 #else
00470   void init() { } //!< Doesn't do anything if you have the MUTEX_LOCK_ET_USE_SPINCOUNT undef'ed.  Used to do a memset, but that was causing problems....
00471   //memset((void*)doors,0,sizeof(doors)); } 
00472   inline void spin() {
00473 #ifndef PLATFORM_APERIOS
00474     usleep(usleep_granularity*10); //this is a carefully chosen value intended to solve all the world's problems (not)
00475 #endif
00476   } //!< If you find a way to sleep for a few microseconds instead of busy waiting, put it here
00477 #endif
00478     
00479   //! Holds per process shared info, one of these per process
00480   struct door_t {
00481     door_t() : id(NO_OWNER), FCFS_in_use(false), BL_ready(false), BL_in_use(false), turn('\0'), next_turn_bit('\0') {} //!< constructor
00482     //door_t(int i) : id(i), FCFS_in_use(false), BL_ready(false), BL_in_use(false), next_turn_bit('\0') {}
00483     int id; //!< process ID this doorway is assigned to
00484     volatile bool FCFS_in_use; //!< In FCFS doorway, corresponds to 'c_i'
00485     volatile bool BL_ready; //!< Signals past FCFS doorway, ready for BL doorway, corresponds to 'v_i'
00486     volatile bool BL_in_use; //!< Burns-Lamport doorway, corresponds to 'x_i'
00487     volatile unsigned char turn; //!< clock pulse, initial value doesn't matter
00488     unsigned char next_turn_bit; //!< selects which bit of turn will be flipped next
00489   };
00490 
00491   door_t doors[num_doors]; //!< holds all the doors
00492   unsigned int doors_used; //!< counts the number of doors used
00493   unsigned int owner_index; //!< holds the door index of the current lock owner
00494   unsigned int lockcount; //!< the depth of the lock, 0 when unlocked
00495 };
00496 
00497 
00498 template<unsigned int num_doors>
00499 void
00500 MutexLock<num_doors>::lock(int id) {
00501 #ifndef PLATFORM_APERIOS
00502   thdLock[sem].lock();
00503 #endif
00504   if(owner()!=id) {
00505     if(!do_try_lock(lookup(id),true)) {
00506       //spin(); //note the block argument above -- should never spin if that is actually working
00507       std::cout << "Warning: lock() failed to achieve lock" << std::endl;
00508     }
00509   } else {
00510 #ifdef DEBUG_MUTEX_LOCK
00511     if(state==NULL || state->buttons[LFrPawOffset])
00512       std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00513 #endif
00514   }
00515   lockcount++;
00516 }
00517 
00518 
00519 template<unsigned int num_doors>
00520 bool
00521 MutexLock<num_doors>::try_lock(int id) {
00522 #ifndef PLATFORM_APERIOS
00523   if(!thdLock[sem].trylock())
00524     return false;
00525 #endif
00526   if(owner()==id) {
00527 #ifdef DEBUG_MUTEX_LOCK
00528     if(state==NULL || state->buttons[LFrPawOffset])
00529       std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00530 #endif
00531     lockcount++;
00532     return true;
00533   } else {
00534     if(do_try_lock(lookup(id),false)) {
00535       lockcount++;
00536       return true;
00537     } else {
00538 #ifndef PLATFORM_APERIOS
00539       thdLock[sem].unlock())
00540 #endif
00541       return false;
00542     }
00543   }
00544 }
00545 
00546 
00547 template<unsigned int num_doors>
00548 void
00549 MutexLock<num_doors>::unlock() {
00550   if(lockcount==0) {
00551     std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00552     return;
00553   }
00554 #ifdef DEBUG_MUTEX_LOCK
00555   if(state==NULL || state->buttons[LFrPawOffset])
00556     std::cerr << doors[owner_index].id << " unlock " << this << " level "<< lockcount << std::endl;
00557 #endif
00558   if(--lockcount==0) {
00559     if(owner_index!=NO_OWNER) {
00560       unsigned int tmp = owner_index;
00561       owner_index=NO_OWNER;
00562       doors[tmp].BL_in_use=false;
00563       doors[tmp].BL_ready=false;
00564       // *** Lock has been released *** //
00565 #ifndef PLATFORM_APERIOS
00566       if(owner_index==id) {
00567         thdLock[sem].unlock();
00568       }
00569 #endif
00570     }
00571   }
00572 }
00573 
00574 
00575 //! If you define this to do something more interesting, can use it to see what's going on in the locking process
00576 //#define mutexdebugout(i,c) { std::cout << ((char)(i==0?c:((i==1?'M':'a')+(c-'A')))) << std::flush; }
00577 
00578 
00579 template<unsigned int num_doors>
00580 bool
00581 MutexLock<num_doors>::do_try_lock(unsigned int i, bool block) {
00582   if(i==NO_OWNER) {
00583     std::cerr << "WARNING: new process attempted to lock beyond num_doors ("<<num_doors<<")" << std::endl;
00584     return false;
00585   }
00586 #ifdef DEBUG_MUTEX_LOCK
00587   if(state==NULL || state->buttons[LFrPawOffset])
00588     std::cerr << doors[i].id << " attempting lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00589 #endif  
00590   unsigned char S[num_doors]; // a local copy of everyone's doors
00591   // *** Entering FCFS doorway *** //
00592   doors[i].FCFS_in_use=true;
00593   for(unsigned int j=0; j<num_doors; j++)
00594     S[j]=doors[j].turn;
00595   doors[i].next_turn_bit=1-doors[i].next_turn_bit;
00596   doors[i].turn^=(1<<doors[i].next_turn_bit);
00597   doors[i].BL_ready=true;
00598   doors[i].FCFS_in_use=false;
00599   // *** Leaving FCFS doorway *** //
00600   for(unsigned int j=0; j<num_doors; j++) {
00601     while(doors[j].FCFS_in_use || (doors[j].BL_ready && S[j]==doors[j].turn))
00602       if(block)
00603         spin();
00604       else {
00605         doors[i].BL_ready=false;
00606 #ifdef DEBUG_MUTEX_LOCK
00607         if(state==NULL || state->buttons[LFrPawOffset])
00608           std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00609 #endif  
00610         return false;
00611       }
00612   }
00613   // *** Entering Burns-Lamport *** //
00614   do {
00615     doors[i].BL_in_use=true;
00616     for(unsigned int t=0; t<i; t++)
00617       if(doors[t].BL_in_use) {
00618         doors[i].BL_in_use=false;
00619         if(!block) {
00620           doors[i].BL_ready=false;
00621 #ifdef DEBUG_MUTEX_LOCK
00622           if(state==NULL || state->buttons[LFrPawOffset])
00623             std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00624 #endif  
00625           return false;
00626         }
00627         while(doors[t].BL_in_use)
00628           spin();
00629         break;
00630       }
00631   } while(!doors[i].BL_in_use);
00632   for(unsigned int t=i+1; t<num_doors; t++)
00633     while(doors[t].BL_in_use)
00634       spin();
00635   // *** Leaving Burns-Lamport ***//
00636   // *** Lock has been given *** //
00637   owner_index=i;
00638 #ifdef DEBUG_MUTEX_LOCK
00639   if(state==NULL || state->buttons[LFrPawOffset])
00640     std::cerr << doors[i].id << " received lock " << this << " at " << get_time() << std::endl;
00641 #endif  
00642   return true;
00643 }
00644 
00645 
00646 template<unsigned int num_doors>
00647 unsigned int
00648 MutexLock<num_doors>::lookup(int id) {
00649   // TODO - this could break if two new processes are adding themselves at the same time
00650   //        or an id is being forgotten at the same time
00651   //I'm expecting a very small number of processes to be involved
00652   //probably not worth overhead of doing something fancy like a sorted array
00653   unsigned int i;
00654   for(i=0; i<doors_used; i++)
00655     if(doors[i].id==id)
00656       return i;
00657   if(i==num_doors)
00658     return NO_OWNER;
00659   doors[i].id=id;
00660   doors_used++;
00661   return i;
00662 }
00663 
00664 
00665 template<unsigned int num_doors>
00666 void
00667 MutexLock<num_doors>::forget(int id) { //not tested thoroughly (or at all?)
00668   unsigned int i = lookup(id);
00669   do_try_lock(i,true);
00670   doors[i].id=doors[--doors_used].id;
00671   doors[doors_used].id=NO_OWNER;
00672   releaseAll();
00673 }
00674 
00675 #endif //MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00676 
00677 /*! @file 
00678  * @brief Defines MutexLock, a software only mutual exclusion lock.
00679  * @author ejt (Creator), Edward A. Lycklama, Vassos Hadzilacos (paper from which this was based)
00680  *
00681  * $Author: ejt $
00682  * $Name: tekkotsu-4_0 $
00683  * $Revision: 1.20 $
00684  * $State: Exp $
00685  * $Date: 2007/11/10 22:58:08 $
00686  */
00687 
00688 #endif

Tekkotsu v4.0
Generated Thu Nov 22 00:54:54 2007 by Doxygen 1.5.4