diff mbox series

[v4,bpf-next,2/6] bpf: prevent deadlock from recursive bpf_task_storage_[get|delete]

Message ID 20210223012014.2087583-3-songliubraving@fb.com
State Superseded
Headers show
Series bpf: enable task local storage for tracing programs | expand

Commit Message

Song Liu Feb. 23, 2021, 1:20 a.m. UTC
BPF helpers bpf_task_storage_[get|delete] could hold two locks:
bpf_local_storage_map_bucket->lock and bpf_local_storage->lock. Calling
these helpers from fentry/fexit programs on functions in bpf_*_storage.c
may cause deadlock on either locks.

Prevent such deadlock with a per cpu counter, bpf_task_storage_busy, which
is similar to bpf_prog_active. We need this counter to be global, because
the two locks here belong to two different objects: bpf_local_storage_map
and bpf_local_storage. If we pick one of them as the owner of the counter,
it is still possible to trigger deadlock on the other lock. For example,
if bpf_local_storage_map owns the counters, it cannot prevent deadlock
on bpf_local_storage->lock when two maps are used.

Signed-off-by: Song Liu <songliubraving@fb.com>
---
 kernel/bpf/bpf_task_storage.c | 57 ++++++++++++++++++++++++++++++-----
 1 file changed, 50 insertions(+), 7 deletions(-)

Comments

Song Liu Feb. 23, 2021, 7:16 a.m. UTC | #1
> On Feb 22, 2021, at 10:21 PM, Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote:
> 
> On Mon, Feb 22, 2021 at 5:23 PM Song Liu <songliubraving@fb.com> wrote:
>> 
>> BPF helpers bpf_task_storage_[get|delete] could hold two locks:
>> bpf_local_storage_map_bucket->lock and bpf_local_storage->lock. Calling
>> these helpers from fentry/fexit programs on functions in bpf_*_storage.c
>> may cause deadlock on either locks.
>> 
>> Prevent such deadlock with a per cpu counter, bpf_task_storage_busy, which
>> is similar to bpf_prog_active. We need this counter to be global, because
>> the two locks here belong to two different objects: bpf_local_storage_map
>> and bpf_local_storage. If we pick one of them as the owner of the counter,
>> it is still possible to trigger deadlock on the other lock. For example,
>> if bpf_local_storage_map owns the counters, it cannot prevent deadlock
>> on bpf_local_storage->lock when two maps are used.
>> 
>> Signed-off-by: Song Liu <songliubraving@fb.com>
>> ---
>> kernel/bpf/bpf_task_storage.c | 57 ++++++++++++++++++++++++++++++-----
>> 1 file changed, 50 insertions(+), 7 deletions(-)
>> 
> 
> [...]
> 
>> @@ -109,7 +136,9 @@ static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
>>                goto out;
>>        }
>> 
>> +       bpf_task_storage_lock();
>>        sdata = task_storage_lookup(task, map, true);
>> +       bpf_task_storage_unlock();
>>        put_pid(pid);
>>        return sdata ? sdata->data : NULL;
>> out:
>> @@ -141,8 +170,10 @@ static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
>>                goto out;
>>        }
>> 
>> +       bpf_task_storage_lock();
>>        sdata = bpf_local_storage_update(
>>                task, (struct bpf_local_storage_map *)map, value, map_flags);
> 
> this should probably be container_of() instead of casting

bpf_task_storage.c uses casting in multiple places. How about we fix it in a 
separate patch?

Thanks,
Song

> 
>> +       bpf_task_storage_unlock();
>> 
>>        err = PTR_ERR_OR_ZERO(sdata);
>> out:
>> @@ -185,7 +216,9 @@ static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
>>                goto out;
>>        }
>> 
>> +       bpf_task_storage_lock();
>>        err = task_storage_delete(task, map);
>> +       bpf_task_storage_unlock();
>> out:
>>        put_pid(pid);
>>        return err;
> 
> [...]
Andrii Nakryiko Feb. 23, 2021, 7:19 a.m. UTC | #2
On Mon, Feb 22, 2021 at 11:16 PM Song Liu <songliubraving@fb.com> wrote:
>
>
>
> > On Feb 22, 2021, at 10:21 PM, Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote:
> >
> > On Mon, Feb 22, 2021 at 5:23 PM Song Liu <songliubraving@fb.com> wrote:
> >>
> >> BPF helpers bpf_task_storage_[get|delete] could hold two locks:
> >> bpf_local_storage_map_bucket->lock and bpf_local_storage->lock. Calling
> >> these helpers from fentry/fexit programs on functions in bpf_*_storage.c
> >> may cause deadlock on either locks.
> >>
> >> Prevent such deadlock with a per cpu counter, bpf_task_storage_busy, which
> >> is similar to bpf_prog_active. We need this counter to be global, because
> >> the two locks here belong to two different objects: bpf_local_storage_map
> >> and bpf_local_storage. If we pick one of them as the owner of the counter,
> >> it is still possible to trigger deadlock on the other lock. For example,
> >> if bpf_local_storage_map owns the counters, it cannot prevent deadlock
> >> on bpf_local_storage->lock when two maps are used.
> >>
> >> Signed-off-by: Song Liu <songliubraving@fb.com>
> >> ---
> >> kernel/bpf/bpf_task_storage.c | 57 ++++++++++++++++++++++++++++++-----
> >> 1 file changed, 50 insertions(+), 7 deletions(-)
> >>
> >
> > [...]
> >
> >> @@ -109,7 +136,9 @@ static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
> >>                goto out;
> >>        }
> >>
> >> +       bpf_task_storage_lock();
> >>        sdata = task_storage_lookup(task, map, true);
> >> +       bpf_task_storage_unlock();
> >>        put_pid(pid);
> >>        return sdata ? sdata->data : NULL;
> >> out:
> >> @@ -141,8 +170,10 @@ static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
> >>                goto out;
> >>        }
> >>
> >> +       bpf_task_storage_lock();
> >>        sdata = bpf_local_storage_update(
> >>                task, (struct bpf_local_storage_map *)map, value, map_flags);
> >
> > this should probably be container_of() instead of casting
>
> bpf_task_storage.c uses casting in multiple places. How about we fix it in a
> separate patch?
>

Sure, let's fix all uses in a separate patch. My point is that this
makes an assumption (that struct bpf_map map field is always the very
first one) which is not enforced and not documented in struct
bpf_local_storage_map.

> Thanks,
> Song
>
> >
> >> +       bpf_task_storage_unlock();
> >>
> >>        err = PTR_ERR_OR_ZERO(sdata);
> >> out:
> >> @@ -185,7 +216,9 @@ static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
> >>                goto out;
> >>        }
> >>
> >> +       bpf_task_storage_lock();
> >>        err = task_storage_delete(task, map);
> >> +       bpf_task_storage_unlock();
> >> out:
> >>        put_pid(pid);
> >>        return err;
> >
> > [...]
>
diff mbox series

Patch

diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index 2034019966d44..ed7d2e02b1c19 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -20,6 +20,31 @@ 
 
 DEFINE_BPF_STORAGE_CACHE(task_cache);
 
+DEFINE_PER_CPU(int, bpf_task_storage_busy);
+
+static void bpf_task_storage_lock(void)
+{
+	migrate_disable();
+	__this_cpu_inc(bpf_task_storage_busy);
+}
+
+static void bpf_task_storage_unlock(void)
+{
+	__this_cpu_dec(bpf_task_storage_busy);
+	migrate_enable();
+}
+
+static bool bpf_task_storage_trylock(void)
+{
+	migrate_disable();
+	if (unlikely(__this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
+		__this_cpu_dec(bpf_task_storage_busy);
+		migrate_enable();
+		return false;
+	}
+	return true;
+}
+
 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
 {
 	struct task_struct *task = owner;
@@ -67,6 +92,7 @@  void bpf_task_storage_free(struct task_struct *task)
 	 * when unlinking elem from the local_storage->list and
 	 * the map's bucket->list.
 	 */
+	bpf_task_storage_lock();
 	raw_spin_lock_irqsave(&local_storage->lock, flags);
 	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
 		/* Always unlink from map before unlinking from
@@ -77,6 +103,7 @@  void bpf_task_storage_free(struct task_struct *task)
 			local_storage, selem, false);
 	}
 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+	bpf_task_storage_unlock();
 	rcu_read_unlock();
 
 	/* free_task_storage should always be true as long as
@@ -109,7 +136,9 @@  static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
 		goto out;
 	}
 
+	bpf_task_storage_lock();
 	sdata = task_storage_lookup(task, map, true);
+	bpf_task_storage_unlock();
 	put_pid(pid);
 	return sdata ? sdata->data : NULL;
 out:
@@ -141,8 +170,10 @@  static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
 		goto out;
 	}
 
+	bpf_task_storage_lock();
 	sdata = bpf_local_storage_update(
 		task, (struct bpf_local_storage_map *)map, value, map_flags);
+	bpf_task_storage_unlock();
 
 	err = PTR_ERR_OR_ZERO(sdata);
 out:
@@ -185,7 +216,9 @@  static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
 		goto out;
 	}
 
+	bpf_task_storage_lock();
 	err = task_storage_delete(task, map);
+	bpf_task_storage_unlock();
 out:
 	put_pid(pid);
 	return err;
@@ -207,34 +240,44 @@  BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
 	if (!task || !task_storage_ptr(task))
 		return (unsigned long)NULL;
 
+	if (!bpf_task_storage_trylock())
+		return (unsigned long)NULL;
+
 	sdata = task_storage_lookup(task, map, true);
 	if (sdata)
-		return (unsigned long)sdata->data;
+		goto unlock;
 
 	/* only allocate new storage, when the task is refcounted */
 	if (refcount_read(&task->usage) &&
-	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) {
+	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
 		sdata = bpf_local_storage_update(
 			task, (struct bpf_local_storage_map *)map, value,
 			BPF_NOEXIST);
-		return IS_ERR(sdata) ? (unsigned long)NULL :
-					     (unsigned long)sdata->data;
-	}
 
-	return (unsigned long)NULL;
+unlock:
+	bpf_task_storage_unlock();
+	return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL :
+		(unsigned long)sdata->data;
 }
 
 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
 	   task)
 {
+	int ret;
+
 	if (!task)
 		return -EINVAL;
 
+	if (!bpf_task_storage_trylock())
+		return -EBUSY;
+
 	/* This helper must only be called from places where the lifetime of the task
 	 * is guaranteed. Either by being refcounted or by being protected
 	 * by an RCU read-side critical section.
 	 */
-	return task_storage_delete(task, map);
+	ret = task_storage_delete(task, map);
+	bpf_task_storage_unlock();
+	return ret;
 }
 
 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)