MutexLock.h
Go to the documentation of this file.00001
00002 #ifndef __MUTEX_LOCK_ET__
00003 #define __MUTEX_LOCK_ET__
00004
00005 #include "Shared/Resource.h"
00006 #include "ProcessID.h"
00007 #if !defined(PLATFORM_APERIOS)
00008 # include "IPC/RCRegion.h"
00009 #endif
00010 #include <iostream>
00011 #include <exception>
00012 #include <typeinfo>
00013
00014
00015
00016
00017
00018
00019
00020 class SemaphoreManager;
00021
00022
00023
00024
00025
00026
00027
00028
00029 class MutexLockBase : public Resource {
00030 public:
00031 virtual ~MutexLockBase() {}
00032
00033 static const unsigned int NO_OWNER=-1U;
00034 static unsigned int usleep_granularity;
00035
00036
00037 #if !defined(PLATFORM_APERIOS) && !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY)
00038 # if !defined(TEKKOTSU_SHM_STYLE) || TEKKOTSU_SHM_STYLE==NO_SHM
00039 static void aboutToFork() {}
00040 # else
00041
00042 class no_more_semaphores : public std::exception {
00043 public:
00044
00045 no_more_semaphores() throw() : std::exception() {}
00046
00047 virtual const char* what() const throw() { return "SemaphoreManager::getSemaphore() returned invalid()"; }
00048 };
00049
00050
00051
00052 static void setSemaphoreManager(SemaphoreManager* mgr);
00053
00054
00055 static SemaphoreManager* getSemaphoreManager() {
00056 return semgr;
00057 }
00058
00059 static void aboutToFork();
00060
00061 protected:
00062
00063 static SemaphoreManager* semgr;
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077 static SemaphoreManager preallocated;
00078 # endif
00079 #endif
00080 };
00081
00082
00083
00084 #if !defined(PLATFORM_APERIOS) && !defined(MUTEX_LOCK_ET_USE_SOFTWARE_ONLY)
00085 # if !defined(TEKKOTSU_SHM_STYLE) || TEKKOTSU_SHM_STYLE==NO_SHM
00086 # include "Thread.h"
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105 template<unsigned int num_doors>
00106 class MutexLock : public MutexLockBase {
00107 public:
00108
00109 MutexLock() : owner_index(NO_OWNER), thdLock() {}
00110
00111
00112 ~MutexLock() {
00113 if(owner_index!=NO_OWNER) {
00114 owner_index=NO_OWNER;
00115 while(thdLock.getLockLevel()>0)
00116 thdLock.unlock();
00117 }
00118 }
00119
00120
00121
00122
00123 void lock(int id) {
00124 thdLock.lock();
00125 if(owner_index!=static_cast<unsigned>(id))
00126 owner_index=id;
00127 }
00128
00129
00130
00131
00132 bool try_lock(int id) {
00133 if(!thdLock.trylock())
00134 return false;
00135 owner_index=id;
00136 return true;
00137 }
00138
00139
00140 inline void unlock() {
00141 if(thdLock.getLockLevel()<=0)
00142 std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00143 if(thdLock.getLockLevel()<=1)
00144 owner_index=NO_OWNER;
00145 thdLock.unlock();
00146 }
00147
00148
00149 void releaseAll() {
00150 owner_index=NO_OWNER;
00151 while(thdLock.getLockLevel()>0)
00152 thdLock.unlock();
00153 }
00154
00155
00156 unsigned int get_lock_level() const { return thdLock.getLockLevel(); }
00157
00158
00159 inline int owner() const { return owner_index; }
00160
00161 protected:
00162 friend class MarkScope;
00163 virtual void useResource(Resource::Data&) { lock(ProcessID::getID()); }
00164 virtual void releaseResource(Resource::Data&) { unlock(); }
00165
00166 unsigned int owner_index;
00167 Thread::Lock thdLock;
00168 };
00169
00170 # else
00171 # include "SemaphoreManager.h"
00172 # include <unistd.h>
00173 # include <pthread.h>
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200 template<unsigned int num_doors>
00201 class MutexLock : public MutexLockBase {
00202 public:
00203
00204 MutexLock() : MutexLockBase(),
00205 sem(semgr->getSemaphore()), owner_index(NO_OWNER), owner_thread()
00206 {
00207 if(sem==semgr->invalid())
00208 throw no_more_semaphores();
00209 semgr->setValue(sem,0);
00210 }
00211
00212
00213 MutexLock(SemaphoreManager::semid_t semid) : MutexLockBase(),
00214 sem(semid), owner_index(NO_OWNER), owner_thread()
00215 {
00216 if(sem==semgr->invalid())
00217 throw no_more_semaphores();
00218 semgr->setValue(sem,0);
00219 }
00220
00221
00222 ~MutexLock() {
00223 if(owner_index!=NO_OWNER) {
00224 owner_index=NO_OWNER;
00225 owner_thread=pthread_self();
00226 if(semgr!=NULL && !semgr->hadFault()) {
00227 unsigned int depth=semgr->getValue(sem);
00228 semgr->setValue(sem,0);
00229 while(depth-->0)
00230 Thread::popNoCancel();
00231 }
00232 }
00233 if(semgr!=NULL && !semgr->hadFault())
00234 semgr->releaseSemaphore(sem);
00235 else
00236 std::cerr << "Warning: MutexLock leaked semaphore " << sem << " because SemaphoreManager is NULL" << std::endl;
00237 }
00238
00239
00240
00241
00242 void lock(int id) {
00243 Thread::pushNoCancel();
00244 doLock(id);
00245 }
00246
00247
00248
00249
00250 bool try_lock(int id) {
00251 Thread::pushNoCancel();
00252 if(semgr==NULL || semgr->hadFault()) {
00253 std::cerr << "Warning: MutexLock assuming try_lock success of " << sem << " because SemaphoreManager is NULL" << std::endl;
00254 owner_index=id;
00255 return true;
00256 }
00257
00258 semgr->raise(sem,1);
00259
00260 if(owner()==id && isOwnerThread()) {
00261
00262 return true;
00263 } else {
00264 if(semgr->getValue(sem)==1) {
00265
00266 owner_index=id;
00267 owner_thread=pthread_self();
00268 return true;
00269 } else {
00270
00271 if(!semgr->lower(sem,1,false))
00272 std::cerr << "Warning: MutexLock::trylock failure caused strange underflow" << std::endl;
00273 Thread::popNoCancel();
00274 return false;
00275 }
00276 }
00277 }
00278
00279
00280 inline void unlock() {
00281 releaseResource(emptyData);
00282 Thread::popNoCancel();
00283 }
00284
00285
00286
00287 void releaseAll() {
00288 owner_index=NO_OWNER;
00289 owner_thread=pthread_self();
00290 if(semgr==NULL || semgr->hadFault()) {
00291 std::cerr << "Warning: MutexLock assuming releaseAll of " << sem << " because SemaphoreManager is NULL" << std::endl;
00292 return;
00293 }
00294 unsigned int depth=semgr->getValue(sem);
00295 semgr->setValue(sem,0);
00296 while(depth-->0)
00297 Thread::popNoCancel();
00298 }
00299
00300
00301 unsigned int get_lock_level() const {
00302 if(semgr==NULL || semgr->hadFault())
00303 return (owner_index==NO_OWNER) ? 0 : 1;
00304 else
00305 return semgr->getValue(sem);
00306 }
00307
00308
00309 inline int owner() const { return owner_index; }
00310
00311 protected:
00312
00313 bool isOwnerThread() {
00314 pthread_t cur=pthread_self();
00315 return pthread_equal(cur,owner_thread);
00316 }
00317 friend class MarkScope;
00318
00319 void doLock(int id) {
00320 if(owner_index!=static_cast<unsigned>(id) || !isOwnerThread()) {
00321
00322 if(semgr!=NULL && !semgr->hadFault()) {
00323 semgr->testZero_add(sem,1);
00324 #ifdef DEBUG
00325 if(owner_index!=NO_OWNER || !pthread_equal(pthread_t(),owner_thread))
00326 std::cerr << "Owner is not clear: " << owner_index << ' ' << owner_thread << std::endl;
00327 #endif
00328 } else
00329 std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00330 owner_index=id;
00331 owner_thread=pthread_self();
00332 } else {
00333
00334 if(semgr!=NULL && !semgr->hadFault())
00335 semgr->raise(sem,1);
00336 else
00337 std::cerr << "Warning: MutexLock assuming lock of " << sem << " because SemaphoreManager is NULL" << std::endl;
00338 }
00339 }
00340 virtual void useResource(Resource::Data&) {
00341 doLock(ProcessID::getID());
00342 }
00343 virtual void releaseResource(Resource::Data&) {
00344 if(semgr==NULL || semgr->hadFault()) {
00345 std::cerr << "Warning: MutexLock assuming unlock of " << sem << " from " << owner_index << " because SemaphoreManager is NULL" << std::endl;
00346 owner_index=NO_OWNER;
00347 owner_thread=pthread_t();
00348 return;
00349 }
00350 if(owner_index==NO_OWNER || !isOwnerThread()) {
00351 std::cerr << "Warning: MutexLock::unlock called by thread that didn't own the lock " << owner_index << ' ' << owner_thread << ' ' << pthread_self() << ' ' << semgr->getValue(sem) << std::endl;
00352 return;
00353 }
00354 int depth = semgr->getValue(sem);
00355 if(depth==1) {
00356 owner_index=NO_OWNER;
00357 owner_thread=pthread_t();
00358 } else if(depth<=0) {
00359 std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00360 owner_index=NO_OWNER;
00361 owner_thread=pthread_t();
00362 return;
00363 }
00364 if(!semgr->lower(sem,1,false))
00365 std::cerr << "Warning: MutexLock::unlock caused strange underflow" << std::endl;
00366 }
00367
00368 SemaphoreManager::semid_t sem;
00369 unsigned int owner_index;
00370 pthread_t owner_thread;
00371
00372 private:
00373 MutexLock(const MutexLock& ml);
00374 MutexLock& operator=(const MutexLock& ml);
00375 };
00376
00377
00378
00379
00380 # endif
00381 #else //SOFTWARE ONLY mutual exclusion, used on Aperios, or if MUTEX_LOCK_ET_USE_SOFTWARE_ONLY is defined
00382
00383
00384
00385
00386
00387
00388
00389
00390 #ifdef DEBUG_MUTEX_LOCK
00391 # include "Shared/WorldState.h"
00392 #endif
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423 template<unsigned int num_doors>
00424 class MutexLock : public MutexLockBase {
00425 public:
00426
00427 MutexLock() : doors_used(0), owner_index(NO_OWNER), lockcount(0) { init(); }
00428
00429 #ifndef PLATFORM_APERIOS
00430
00431 ~MutexLock() {
00432 if(owner_index!=NO_OWNER) {
00433 owner_index=NO_OWNER;
00434 thdLock[sem].unlock();
00435 }
00436 }
00437 #endif
00438
00439
00440
00441
00442
00443 void lock(int id);
00444
00445
00446
00447
00448 bool try_lock(int id);
00449
00450
00451 inline void unlock();
00452
00453
00454 void releaseAll() { lockcount=1; unlock(); }
00455
00456
00457 unsigned int get_lock_level() const { return lockcount; }
00458
00459
00460 inline int owner() const { return owner_index==NO_OWNER ? NO_OWNER : doors[owner_index].id; }
00461
00462
00463 void forget(int id);
00464
00465 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00466 inline unsigned int getSpincount() { return spincount; }
00467 inline unsigned int resetSpincount() { spincount=0; }
00468 #endif
00469
00470 protected:
00471 friend class MarkScope;
00472 virtual void useResource(Resource::Data&) {
00473 lock(ProcessID::getID());
00474 }
00475 virtual void releaseResource(Resource::Data&) {
00476 unlock();
00477 }
00478
00479
00480
00481
00482 bool do_try_lock(unsigned int index, bool block);
00483
00484
00485 unsigned int lookup(int id);
00486
00487 #ifdef MUTEX_LOCK_ET_USE_SPINCOUNT
00488 volatile unsigned int spincount;
00489 void init() { spincount=0; }
00490 inline void spin() {
00491 spincount++;
00492 #ifndef PLATFORM_APERIOS
00493 usleep(usleep_granularity*10);
00494 #endif
00495 }
00496 #else
00497 void init() { }
00498
00499 inline void spin() {
00500 #ifndef PLATFORM_APERIOS
00501 usleep(usleep_granularity*10);
00502 #endif
00503 }
00504 #endif
00505
00506
00507 struct door_t {
00508 door_t() : id(NO_OWNER), FCFS_in_use(false), BL_ready(false), BL_in_use(false), turn('\0'), next_turn_bit('\0') {}
00509
00510 int id;
00511 volatile bool FCFS_in_use;
00512 volatile bool BL_ready;
00513 volatile bool BL_in_use;
00514 volatile unsigned char turn;
00515 unsigned char next_turn_bit;
00516 };
00517
00518 door_t doors[num_doors];
00519 unsigned int doors_used;
00520 unsigned int owner_index;
00521 unsigned int lockcount;
00522 };
00523
00524
00525 template<unsigned int num_doors>
00526 void
00527 MutexLock<num_doors>::lock(int id) {
00528 #ifndef PLATFORM_APERIOS
00529 thdLock[sem].lock();
00530 #endif
00531 if(owner()!=id) {
00532 if(!do_try_lock(lookup(id),true)) {
00533
00534 std::cout << "Warning: lock() failed to achieve lock" << std::endl;
00535 }
00536 } else {
00537 #ifdef DEBUG_MUTEX_LOCK
00538 if(state==NULL || state->buttons[LFrPawOffset])
00539 std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00540 #endif
00541 }
00542 lockcount++;
00543 }
00544
00545
00546 template<unsigned int num_doors>
00547 bool
00548 MutexLock<num_doors>::try_lock(int id) {
00549 #ifndef PLATFORM_APERIOS
00550 if(!thdLock[sem].trylock())
00551 return false;
00552 #endif
00553 if(owner()==id) {
00554 #ifdef DEBUG_MUTEX_LOCK
00555 if(state==NULL || state->buttons[LFrPawOffset])
00556 std::cerr << id << " re-locked " << this << " level " << lockcount+1 << std::endl;
00557 #endif
00558 lockcount++;
00559 return true;
00560 } else {
00561 if(do_try_lock(lookup(id),false)) {
00562 lockcount++;
00563 return true;
00564 } else {
00565 #ifndef PLATFORM_APERIOS
00566 thdLock[sem].unlock())
00567 #endif
00568 return false;
00569 }
00570 }
00571 }
00572
00573
00574 template<unsigned int num_doors>
00575 void
00576 MutexLock<num_doors>::unlock() {
00577 if(lockcount==0) {
00578 std::cerr << "Warning: MutexLock::unlock caused underflow" << std::endl;
00579 return;
00580 }
00581 #ifdef DEBUG_MUTEX_LOCK
00582 if(state==NULL || state->buttons[LFrPawOffset])
00583 std::cerr << doors[owner_index].id << " unlock " << this << " level "<< lockcount << std::endl;
00584 #endif
00585 if(--lockcount==0) {
00586 if(owner_index!=NO_OWNER) {
00587 unsigned int tmp = owner_index;
00588 owner_index=NO_OWNER;
00589 doors[tmp].BL_in_use=false;
00590 doors[tmp].BL_ready=false;
00591
00592 #ifndef PLATFORM_APERIOS
00593 if(owner_index==id) {
00594 thdLock[sem].unlock();
00595 }
00596 #endif
00597 }
00598 }
00599 }
00600
00601
00602
00603
00604
00605
00606 template<unsigned int num_doors>
00607 bool
00608 MutexLock<num_doors>::do_try_lock(unsigned int i, bool block) {
00609 if(i==NO_OWNER) {
00610 std::cerr << "WARNING: new process attempted to lock beyond num_doors ("<<num_doors<<")" << std::endl;
00611 return false;
00612 }
00613 #ifdef DEBUG_MUTEX_LOCK
00614 if(state==NULL || state->buttons[LFrPawOffset])
00615 std::cerr << doors[i].id << " attempting lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00616 #endif
00617 unsigned char S[num_doors];
00618
00619 doors[i].FCFS_in_use=true;
00620 for(unsigned int j=0; j<num_doors; j++)
00621 S[j]=doors[j].turn;
00622 doors[i].next_turn_bit=1-doors[i].next_turn_bit;
00623 doors[i].turn^=(1<<doors[i].next_turn_bit);
00624 doors[i].BL_ready=true;
00625 doors[i].FCFS_in_use=false;
00626
00627 for(unsigned int j=0; j<num_doors; j++) {
00628 while(doors[j].FCFS_in_use || (doors[j].BL_ready && S[j]==doors[j].turn))
00629 if(block)
00630 spin();
00631 else {
00632 doors[i].BL_ready=false;
00633 #ifdef DEBUG_MUTEX_LOCK
00634 if(state==NULL || state->buttons[LFrPawOffset])
00635 std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00636 #endif
00637 return false;
00638 }
00639 }
00640
00641 do {
00642 doors[i].BL_in_use=true;
00643 for(unsigned int t=0; t<i; t++)
00644 if(doors[t].BL_in_use) {
00645 doors[i].BL_in_use=false;
00646 if(!block) {
00647 doors[i].BL_ready=false;
00648 #ifdef DEBUG_MUTEX_LOCK
00649 if(state==NULL || state->buttons[LFrPawOffset])
00650 std::cerr << doors[i].id << " giving up on lock " << this << " held by " << owner_index << " at " << get_time() << std::endl;
00651 #endif
00652 return false;
00653 }
00654 while(doors[t].BL_in_use)
00655 spin();
00656 break;
00657 }
00658 } while(!doors[i].BL_in_use);
00659 for(unsigned int t=i+1; t<num_doors; t++)
00660 while(doors[t].BL_in_use)
00661 spin();
00662
00663
00664 owner_index=i;
00665 #ifdef DEBUG_MUTEX_LOCK
00666 if(state==NULL || state->buttons[LFrPawOffset])
00667 std::cerr << doors[i].id << " received lock " << this << " at " << get_time() << std::endl;
00668 #endif
00669 return true;
00670 }
00671
00672
00673 template<unsigned int num_doors>
00674 unsigned int
00675 MutexLock<num_doors>::lookup(int id) {
00676
00677
00678
00679
00680 unsigned int i;
00681 for(i=0; i<doors_used; i++)
00682 if(doors[i].id==id)
00683 return i;
00684 if(i==num_doors)
00685 return NO_OWNER;
00686 doors[i].id=id;
00687 doors_used++;
00688 return i;
00689 }
00690
00691
00692 template<unsigned int num_doors>
00693 void
00694 MutexLock<num_doors>::forget(int id) {
00695 unsigned int i = lookup(id);
00696 do_try_lock(i,true);
00697 doors[i].id=doors[--doors_used].id;
00698 doors[doors_used].id=NO_OWNER;
00699 releaseAll();
00700 }
00701
00702 #endif //MUTEX_LOCK_ET_USE_SOFTWARE_ONLY
00703
00704
00705
00706
00707
00708
00709 #endif