Add the ability to downgrade a write variant of the cglocks.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
diff --git a/miner.h b/miner.h
index 61a082b..0ac23f8 100644
--- a/miner.h
+++ b/miner.h
@@ -754,17 +754,28 @@ static inline void rw_unlock(pthread_rwlock_t *lock)
{
if (unlikely(pthread_rwlock_unlock(lock)))
quit(1, "WTF RWLOCK ERROR ON UNLOCK!");
- sched_yield();
+}
+
+static inline void rd_unlock_noyield(pthread_rwlock_t *lock)
+{
+ rw_unlock(lock);
+}
+
+static inline void wr_unlock_noyield(pthread_rwlock_t *lock)
+{
+ rw_unlock(lock);
}
static inline void rd_unlock(pthread_rwlock_t *lock)
{
rw_unlock(lock);
+ sched_yield();
}
static inline void wr_unlock(pthread_rwlock_t *lock)
{
rw_unlock(lock);
+ sched_yield();
}
static inline void mutex_init(pthread_mutex_t *lock)
@@ -820,6 +831,14 @@ static inline void cg_wlock(cglock_t *lock)
wr_lock(&lock->rwlock);
}
+/* Downgrade write variant to a read lock */
+static inline void cg_dwlock(cglock_t *lock)
+{
+ wr_unlock_noyield(&lock->rwlock);
+ rd_lock(&lock->rwlock);
+ mutex_unlock_noyield(&lock->mutex);
+}
+
/* Downgrade intermediate variant to a read lock */
static inline void cg_dlock(cglock_t *lock)
{