@@ -81,14 +81,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
un-wait-ed for again. */
pthread_cleanup_push (cleanup, &pd->joinid);
- int oldtype = CANCEL_ASYNC ();
-
- if (abstime != NULL)
- result = lll_timedwait_tid (pd->tid, abstime);
- else
- lll_wait_tid (pd->tid);
-
- CANCEL_RESET (oldtype);
+ result = lll_wait_tid (pd->tid, abstime);
pthread_cleanup_pop (0);
}
@@ -56,14 +56,8 @@ __old_sem_wait (sem_t *sem)
if (atomic_decrement_if_positive (futex) > 0)
return 0;
- /* Enable asynchronous cancellation. Required by the standard. */
- int oldtype = __pthread_enable_asynccancel ();
-
/* Always assume the semaphore is shared. */
- err = lll_futex_wait (futex, 0, LLL_SHARED);
-
- /* Disable asynchronous cancellation. */
- __pthread_disable_asynccancel (oldtype);
+ err = lll_futex_wait_cancel (futex, 0, LLL_SHARED);
}
while (err == 0 || err == -EWOULDBLOCK);
@@ -41,15 +41,15 @@
{ \
pthread_mutex_unlock (&__aio_requests_mutex); \
\
- int oldtype; \
- if (cancel) \
- oldtype = LIBC_CANCEL_ASYNC (); \
- \
int status; \
do \
{ \
- status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
- timeout, FUTEX_PRIVATE); \
+ if (cancel) \
+ status = futex_reltimed_wait_cancelable ( \
+ (unsigned int *) futexaddr, oldval, timeout, FUTEX_PRIVATE); \
+ else \
+ status = futex_reltimed_wait ((unsigned int *) futexaddr, \
+ oldval, timeout, FUTEX_PRIVATE); \
if (status != EAGAIN) \
break; \
\
@@ -57,9 +57,6 @@
} \
while (oldval != 0); \
\
- if (cancel) \
- LIBC_CANCEL_RESET (oldtype); \
- \
if (status == EINTR) \
result = EINTR; \
else if (status == ETIMEDOUT) \
@@ -42,15 +42,15 @@
{ \
pthread_mutex_unlock (&__gai_requests_mutex); \
\
- int oldtype; \
- if (cancel) \
- oldtype = LIBC_CANCEL_ASYNC (); \
- \
int status; \
do \
{ \
- status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
- timeout, FUTEX_PRIVATE); \
+ if (cancel) \
+ status = futex_reltimed_wait_cancelable ( \
+ (unsigned int *) futexaddr, oldval, timeout, FUTEX_PRIVATE); \
+ else \
+ status = futex_reltimed_wait ((unsigned int *) futexaddr, \
+ oldval, timeout, FUTEX_PRIVATE); \
if (status != EAGAIN) \
break; \
\
@@ -58,9 +58,6 @@
} \
while (oldval != 0); \
\
- if (cancel) \
- LIBC_CANCEL_RESET (oldtype); \
- \
if (status == EINTR) \
result = EINTR; \
else if (status == ETIMEDOUT) \
@@ -175,33 +175,29 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
+extern int __lll_timedwait_tid (int *, const struct timespec *)
+ attribute_hidden;
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
- operations for futex wake-up when the clone terminates. */
-#define lll_wait_tid(tid) \
- do { \
- __typeof (tid) __tid; \
- /* We need acquire MO here so that we synchronize \
- with the kernel's store to 0 when the clone \
- terminates. (see above) */ \
- while ((__tid = atomic_load_acquire (&(tid))) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED); \
- } while (0)
-
-extern int __lll_timedwait_tid (int *, const struct timespec *)
- attribute_hidden;
-
-/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
- ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */
-#define lll_timedwait_tid(tid, abstime) \
- ({ \
- int __res = 0; \
- if ((tid) != 0) \
- __res = __lll_timedwait_tid (&(tid), (abstime)); \
- __res; \
+ operations for futex wake-up when the clone terminates.
+ If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
+ occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
+ The futex operation are issues with cancellable versions. */
+#define lll_wait_tid(tid, abstime) \
+ ({ \
+ int __res = 0; \
+ __typeof (tid) __tid; \
+ if (abstime != NULL) \
+ __res = __lll_timedwait_tid (&(tid), (abstime)); \
+ else \
+ /* We need acquire MO here so that we synchronize with the \
+ kernel's store to 0 when the clone terminates. (see above) */ \
+ while ((__tid = atomic_load_acquire (&(tid))) != 0) \
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
+ __res; \
})
@@ -34,16 +34,8 @@ __libc_open64 (const char *file, int oflag, ...)
va_end (arg);
}
- if (SINGLE_THREAD_P)
- return __libc_open (file, oflag | O_LARGEFILE, mode);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = __libc_open (file, oflag | O_LARGEFILE, mode);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ /* __libc_open should be a cancellation point. */
+ return __libc_open (file, oflag | O_LARGEFILE, mode);
}
weak_alias (__libc_open64, __open64)
libc_hidden_weak (__open64)
@@ -85,16 +85,8 @@ do_sigwait (const sigset_t *set, int *sig)
int
__sigwait (const sigset_t *set, int *sig)
{
- if (SINGLE_THREAD_P)
- return do_sigwait (set, sig);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_sigwait (set, sig);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ /* __sigsuspend should be a cancellation point. */
+ return do_waitid (idtype, id, infop, options);
}
libc_hidden_def (__sigwait)
weak_alias (__sigwait, sigwait)
@@ -151,16 +151,8 @@ OUR_WAITID (idtype_t idtype, id_t id, siginfo_t *infop, int options)
int
__waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
{
- if (SINGLE_THREAD_P)
- return do_waitid (idtype, id, infop, options);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_waitid (idtype, id, infop, options);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ /* __waitpid should be a cancellation point. */
+ return do_waitid (idtype, id, infop, options);
}
weak_alias (__waitid, waitid)
strong_alias (__waitid, __libc_waitid)
@@ -28,27 +28,18 @@ int
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
struct timespec *rem)
{
- INTERNAL_SYSCALL_DECL (err);
- int r;
-
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
return EINVAL;
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
- if (SINGLE_THREAD_P)
- r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
- else
- {
- int oldstate = LIBC_CANCEL_ASYNC ();
-
- r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
- rem);
-
- LIBC_CANCEL_RESET (oldstate);
- }
-
+ /* If the call is interrupted by a signal handler or encounters an error,
+ it returns a positive value similar to errno. */
+ INTERNAL_SYSCALL_DECL (err);
+ int r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, err, clock_id, flags,
+ req, rem);
return (INTERNAL_SYSCALL_ERROR_P (r, err)
- ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+ ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+
}
weak_alias (__clock_nanosleep, clock_nanosleep)
@@ -138,9 +138,9 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
const struct timespec *reltime, int private)
{
int oldtype;
- oldtype = __pthread_enable_asynccancel ();
+ oldtype = LIBC_CANCEL_ASYNC ();
int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
- __pthread_disable_asynccancel (oldtype);
+ LIBC_CANCEL_RESET (oldtype);
switch (err)
{
case 0:
@@ -221,32 +221,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
+extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
+ __attribute__ ((regparm (2))) attribute_hidden;
+
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
- operations for futex wake-up when the clone terminates. */
-#define lll_wait_tid(tid) \
- do { \
- __typeof (tid) __tid; \
- while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED);\
- } while (0)
-
-extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
- __attribute__ ((regparm (2))) attribute_hidden;
-
-/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
- ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
- XXX Note that this differs from the generic version in that we do the
- error checking here and not in __lll_timedwait_tid. */
-#define lll_timedwait_tid(tid, abstime) \
- ({ \
- int __result = 0; \
- if ((tid) != 0) \
- __result = __lll_timedwait_tid (&(tid), (abstime)); \
- __result; })
-
+ operations for futex wake-up when the clone terminates.
+ If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
+ occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
+ The futex operation are issues with cancellable versions. */
+#define lll_wait_tid(tid, abstime) \
+ ({ \
+ int __res = 0; \
+ __typeof (tid) __tid; \
+ if (abstime != NULL) \
+ __res = __lll_timedwait_tid (&(tid), (abstime)); \
+ else \
+ /* We need acquire MO here so that we synchronize with the \
+ kernel's store to 0 when the clone terminates. (see above) */ \
+ while ((__tid = atomic_load_acquire (&(tid))) != 0) \
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
+ __res; \
+ })
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
attribute_hidden;
@@ -125,6 +125,17 @@
private), \
nr_wake, nr_move, mutex, val)
+
+/* Cancellable futex macros. */
+#define lll_futex_wait_cancel(futexp, val, private) \
+ ({ \
+ int __oldtype = CANCEL_ASYNC (); \
+ long int __err = lll_futex_wait (futexp, val, LLL_SHARED); \
+ CANCEL_RESET (__oldtype); \
+ __err; \
+ })
+
+
#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock-futex.h */
@@ -108,28 +108,29 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
- wakeup when the clone terminates. The memory location contains the
- thread ID while the clone is running and is reset to zero
- afterwards. */
-#define lll_wait_tid(tid) \
- do \
- { \
- __typeof (tid) __tid; \
- while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED); \
- } \
- while (0)
-
extern int __lll_timedwait_tid (int *, const struct timespec *)
attribute_hidden;
-#define lll_timedwait_tid(tid, abstime) \
- ({ \
- int __res = 0; \
- if ((tid) != 0) \
- __res = __lll_timedwait_tid (&(tid), (abstime)); \
- __res; \
+/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
+ wake-up when the clone terminates. The memory location contains the
+ thread ID while the clone is running and is reset to zero by the kernel
+ afterwards. The kernel up to version 3.16.3 does not use the private futex
+ operations for futex wake-up when the clone terminates.
+ If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
+ occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
+ The futex operation are issues with cancellable versions. */
+#define lll_wait_tid(tid, abstime) \
+ ({ \
+ int __res = 0; \
+ __typeof (tid) __tid; \
+ if (abstime != NULL) \
+ __res = __lll_timedwait_tid (&(tid), (abstime)); \
+ else \
+ /* We need acquire MO here so that we synchronize with the \
+ kernel's store to 0 when the clone terminates. (see above) */ \
+ while ((__tid = atomic_load_acquire (&(tid))) != 0) \
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
+ __res; \
})
#endif /* lowlevellock.h */
@@ -224,32 +224,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
+extern int __lll_timedwait_tid (int *, const struct timespec *)
+ attribute_hidden;
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
- operations for futex wake-up when the clone terminates. */
-#define lll_wait_tid(tid) \
- do { \
- __typeof (tid) __tid; \
- while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED);\
- } while (0)
-
-extern int __lll_timedwait_tid (int *, const struct timespec *)
- attribute_hidden;
-
-/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
- ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
- XXX Note that this differs from the generic version in that we do the
- error checking here and not in __lll_timedwait_tid. */
-#define lll_timedwait_tid(tid, abstime) \
- ({ \
- int __result = 0; \
- if ((tid) != 0) \
- __result = __lll_timedwait_tid (&(tid), (abstime)); \
- __result; })
+ operations for futex wake-up when the clone terminates.
+ If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
+ occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
+ The futex operation are issues with cancellable versions. */
+#define lll_wait_tid(tid, abstime) \
+ ({ \
+ int __res = 0; \
+ __typeof (tid) __tid; \
+ if (abstime != NULL) \
+ __res = __lll_timedwait_tid (&(tid), (abstime)); \
+ else \
+ /* We need acquire MO here so that we synchronize with the \
+ kernel's store to 0 when the clone terminates. (see above) */ \
+ while ((__tid = atomic_load_acquire (&(tid))) != 0) \
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
+ __res; \
+ })
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
attribute_hidden;