@@ -385,16 +385,6 @@ struct compat_ifconf {
compat_caddr_t ifcbuf;
};
-struct compat_robust_list {
- compat_uptr_t next;
-};
-
-struct compat_robust_list_head {
- struct compat_robust_list list;
- compat_long_t futex_offset;
- compat_uptr_t list_op_pending;
-};
-
#ifdef CONFIG_COMPAT_OLD_SIGACTION
struct compat_old_sigaction {
compat_uptr_t sa_handler;
@@ -672,7 +662,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo __user *, int,
struct compat_rusage __user *);
asmlinkage long
-compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+compat_sys_set_robust_list(struct robust_list_head32 __user *head,
compat_size_t len);
asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
@@ -56,6 +56,17 @@ union futex_key {
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
+
+struct robust_list32 {
+ u32 next;
+};
+
+struct robust_list_head32 {
+ struct robust_list32 list;
+ s32 futex_offset;
+ u32 list_op_pending;
+};
+
enum {
FUTEX_STATE_OK,
FUTEX_STATE_EXITING,
@@ -1324,7 +1324,7 @@ struct task_struct {
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
- struct compat_robust_list_head __user *compat_robust_list;
+ struct robust_list_head32 __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
@@ -1144,13 +1144,14 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
return 0;
}
+#ifdef CONFIG_64BIT
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
* We silently return on any sign of list-walking problem.
*/
-static void exit_robust_list(struct task_struct *curr)
+static void exit_robust_list64(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
@@ -1211,8 +1212,13 @@ static void exit_robust_list(struct task_struct *curr)
curr, pip, HANDLE_DEATH_PENDING);
}
}
+#else
+static void exit_robust_list64(struct task_struct *curr)
+{
+ pr_warn("32bit kernel should not allow ROBUST_LIST_64BIT");
+}
+#endif
-#ifdef CONFIG_COMPAT
static void __user *futex_uaddr(struct robust_list __user *entry,
compat_long_t futex_offset)
{
@@ -1226,13 +1232,13 @@ static void __user *futex_uaddr(struct robust_list __user *entry,
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/
static inline int
-compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
- compat_uptr_t __user *head, unsigned int *pi)
+fetch_robust_entry32(u32 *uentry, struct robust_list __user **entry,
+ u32 __user *head, unsigned int *pi)
{
if (get_user(*uentry, head))
return -EFAULT;
- *entry = compat_ptr((*uentry) & ~1);
+ *entry = (void __user *)(unsigned long)((*uentry) & ~1);
*pi = (unsigned int)(*uentry) & 1;
return 0;
@@ -1244,21 +1250,21 @@ compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **ent
*
* We silently return on any sign of list-walking problem.
*/
-static void compat_exit_robust_list(struct task_struct *curr)
+static void exit_robust_list32(struct task_struct *curr)
{
- struct compat_robust_list_head __user *head = curr->compat_robust_list;
+ struct robust_list_head32 __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned int next_pi;
- compat_uptr_t uentry, next_uentry, upending;
- compat_long_t futex_offset;
+ u32 uentry, next_uentry, upending;
+ s32 futex_offset;
int rc;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
- if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
+ if (fetch_robust_entry32((u32 *)&uentry, &entry, (u32 *)&head->list.next, &pi))
return;
/*
* Fetch the relative futex offset:
@@ -1269,7 +1275,7 @@ static void compat_exit_robust_list(struct task_struct *curr)
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
- if (compat_fetch_robust_entry(&upending, &pending,
+ if (fetch_robust_entry32(&upending, &pending,
&head->list_op_pending, &pip))
return;
@@ -1279,8 +1285,8 @@ static void compat_exit_robust_list(struct task_struct *curr)
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
- rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
- (compat_uptr_t __user *)&entry->next, &next_pi);
+ rc = fetch_robust_entry32(&next_uentry, &next_entry,
+ (u32 __user *)&entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* dont process it twice:
@@ -1311,7 +1317,6 @@ static void compat_exit_robust_list(struct task_struct *curr)
handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
}
}
-#endif
#ifdef CONFIG_FUTEX_PI
@@ -1406,14 +1411,21 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
static void futex_cleanup(struct task_struct *tsk)
{
+#ifdef CONFIG_64BIT
if (unlikely(tsk->robust_list)) {
- exit_robust_list(tsk);
+ exit_robust_list64(tsk);
tsk->robust_list = NULL;
}
+#else
+ if (unlikely(tsk->robust_list)) {
+ exit_robust_list32(tsk);
+ tsk->robust_list = NULL;
+ }
+#endif
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
- compat_exit_robust_list(tsk);
+ exit_robust_list32(tsk);
tsk->compat_robust_list = NULL;
}
#endif
@@ -440,7 +440,7 @@ SYSCALL_DEFINE4(futex_requeue,
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(set_robust_list,
- struct compat_robust_list_head __user *, head,
+ struct robust_list_head32 __user *, head,
compat_size_t, len)
{
if (unlikely(len != sizeof(*head)))
@@ -455,7 +455,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
compat_uptr_t __user *, head_ptr,
compat_size_t __user *, len_ptr)
{
- struct compat_robust_list_head __user *head;
+ struct robust_list_head32 __user *head;
unsigned long ret;
struct task_struct *p;
There are two functions for handling robust lists during the task exit: exit_robust_list() and compat_exit_robust_list(). The first one handles either 64bit or 32bit lists, depending if it's a 64bit or 32bit kernel. The compat_exit_robust_list() only exists in 64bit kernels that supports 32bit syscalls, and handles 32bit lists. For the new syscall set_robust_list2(), 64bit kernels need to be able to handle 32bit lists despite having or not support for 32bit syscalls, so make compat_exit_robust_list() exist regardless of compat_ config. Also, use explicitly sizing, otherwise in a 32bit kernel both exit_robust_list() and compat_exit_robust_list() would be the exactly same function, with none of them dealing with 64bit robust lists. Signed-off-by: André Almeida <andrealmeid@igalia.com> --- include/linux/compat.h | 12 +----------- include/linux/futex.h | 11 +++++++++++ include/linux/sched.h | 2 +- kernel/futex/core.c | 44 ++++++++++++++++++++++++++++---------------- kernel/futex/syscalls.c | 4 ++-- 5 files changed, 43 insertions(+), 30 deletions(-)