|
|
|
@ -35,7 +35,7 @@ pthread_mutex_timedlock (mutex, abstime) |
|
|
|
/* We must not check ABSTIME here. If the thread does not block
|
|
|
|
abstime must not be checked for a valid value. */ |
|
|
|
|
|
|
|
switch (mutex->__data.__kind) |
|
|
|
switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP)) |
|
|
|
{ |
|
|
|
/* Recursive mutex. */ |
|
|
|
case PTHREAD_MUTEX_RECURSIVE_NP: |
|
|
|
@ -65,7 +65,7 @@ pthread_mutex_timedlock (mutex, abstime) |
|
|
|
/* Error checking mutex. */ |
|
|
|
case PTHREAD_MUTEX_ERRORCHECK_NP: |
|
|
|
/* Check whether we already hold the mutex. */ |
|
|
|
if (mutex->__data.__owner == id) |
|
|
|
if (__builtin_expect (mutex->__data.__owner == id, 0)) |
|
|
|
return EDEADLK; |
|
|
|
|
|
|
|
/* FALLTHROUGH */ |
|
|
|
@ -134,7 +134,7 @@ pthread_mutex_timedlock (mutex, abstime) |
|
|
|
ENQUEUE_MUTEX (mutex); |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
|
|
|
|
/* Note that we deliberately exist here. If we fall
|
|
|
|
/* Note that we deliberately exit here. If we fall
|
|
|
|
through to the end of the function __nusers would be |
|
|
|
incremented which is not correct because the old |
|
|
|
owner has to be discounted. */ |
|
|
|
@ -194,6 +194,150 @@ pthread_mutex_timedlock (mutex, abstime) |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
break; |
|
|
|
|
|
|
|
case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
|
|
|
case PTHREAD_MUTEX_PI_ERRORCHECK_NP: |
|
|
|
case PTHREAD_MUTEX_PI_NORMAL_NP: |
|
|
|
case PTHREAD_MUTEX_PI_ADAPTIVE_NP: |
|
|
|
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: |
|
|
|
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: |
|
|
|
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: |
|
|
|
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: |
|
|
|
{ |
|
|
|
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; |
|
|
|
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; |
|
|
|
|
|
|
|
if (robust) |
|
|
|
/* Note: robust PI futexes are signaled by setting bit 0. */ |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
|
|
|
(void *) (((uintptr_t) &mutex->__data.__list.__next) |
|
|
|
| 1)); |
|
|
|
|
|
|
|
oldval = mutex->__data.__lock; |
|
|
|
|
|
|
|
/* Check whether we already hold the mutex. */ |
|
|
|
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) |
|
|
|
{ |
|
|
|
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) |
|
|
|
{ |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
return EDEADLK; |
|
|
|
} |
|
|
|
|
|
|
|
if (kind == PTHREAD_MUTEX_RECURSIVE_NP) |
|
|
|
{ |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
|
|
|
|
/* Just bump the counter. */ |
|
|
|
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) |
|
|
|
/* Overflow of the counter. */ |
|
|
|
return EAGAIN; |
|
|
|
|
|
|
|
++mutex->__data.__count; |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
|
|
|
id, 0); |
|
|
|
|
|
|
|
if (oldval != 0) |
|
|
|
{ |
|
|
|
/* The mutex is locked. The kernel will now take care of
|
|
|
|
everything. The timeout value must be a relative value. |
|
|
|
Convert it. */ |
|
|
|
INTERNAL_SYSCALL_DECL (__err); |
|
|
|
|
|
|
|
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
|
|
|
FUTEX_LOCK_PI, 1, abstime); |
|
|
|
if (INTERNAL_SYSCALL_ERROR_P (e, __err)) |
|
|
|
{ |
|
|
|
if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT) |
|
|
|
return ETIMEDOUT; |
|
|
|
|
|
|
|
if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH |
|
|
|
|| INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK) |
|
|
|
{ |
|
|
|
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK |
|
|
|
|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
|
|
|
&& kind != PTHREAD_MUTEX_RECURSIVE_NP)); |
|
|
|
/* ESRCH can happen only for non-robust PI mutexes where
|
|
|
|
the owner of the lock died. */ |
|
|
|
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH |
|
|
|
|| !robust); |
|
|
|
|
|
|
|
/* Delay the thread until the timeout is reached.
|
|
|
|
Then return ETIMEDOUT. */ |
|
|
|
struct timespec reltime; |
|
|
|
struct timespec now; |
|
|
|
|
|
|
|
INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME, |
|
|
|
&now); |
|
|
|
reltime.tv_sec = abstime->tv_sec - now.tv_sec; |
|
|
|
reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec; |
|
|
|
if (reltime.tv_nsec < 0) |
|
|
|
{ |
|
|
|
reltime.tv_nsec += 1000000000; |
|
|
|
--reltime.tv_sec; |
|
|
|
} |
|
|
|
if (reltime.tv_sec >= 0) |
|
|
|
while (__nanosleep_nocancel (&reltime, &reltime) != 0) |
|
|
|
continue; |
|
|
|
|
|
|
|
return ETIMEDOUT; |
|
|
|
} |
|
|
|
|
|
|
|
return INTERNAL_SYSCALL_ERRNO (e, __err); |
|
|
|
} |
|
|
|
|
|
|
|
oldval = mutex->__data.__lock; |
|
|
|
|
|
|
|
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); |
|
|
|
} |
|
|
|
|
|
|
|
if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) |
|
|
|
{ |
|
|
|
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); |
|
|
|
|
|
|
|
/* We got the mutex. */ |
|
|
|
mutex->__data.__count = 1; |
|
|
|
/* But it is inconsistent unless marked otherwise. */ |
|
|
|
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; |
|
|
|
|
|
|
|
ENQUEUE_MUTEX_PI (mutex); |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
|
|
|
|
/* Note that we deliberately exit here. If we fall
|
|
|
|
through to the end of the function __nusers would be |
|
|
|
incremented which is not correct because the old owner |
|
|
|
has to be discounted. */ |
|
|
|
return EOWNERDEAD; |
|
|
|
} |
|
|
|
|
|
|
|
if (robust |
|
|
|
&& __builtin_expect (mutex->__data.__owner |
|
|
|
== PTHREAD_MUTEX_NOTRECOVERABLE, 0)) |
|
|
|
{ |
|
|
|
/* This mutex is now not recoverable. */ |
|
|
|
mutex->__data.__count = 0; |
|
|
|
|
|
|
|
INTERNAL_SYSCALL_DECL (__err); |
|
|
|
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, |
|
|
|
FUTEX_UNLOCK_PI, 0, 0); |
|
|
|
|
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
return ENOTRECOVERABLE; |
|
|
|
} |
|
|
|
|
|
|
|
mutex->__data.__count = 1; |
|
|
|
if (robust) |
|
|
|
{ |
|
|
|
ENQUEUE_MUTEX_PI (mutex); |
|
|
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
|
|
|
} |
|
|
|
} |
|
|
|
break; |
|
|
|
|
|
|
|
default: |
|
|
|
/* Correct code cannot set any other type. */ |
|
|
|
return EINVAL; |
|
|
|
|