Message ID | 20210310104139.679618-1-elver@google.com |
---|---|
Headers | show |
Series | Add support for synchronous signals on perf events | expand |
On Wed, Mar 10, 2021 at 11:41AM +0100, Marco Elver wrote: > Adds bit perf_event_attr::remove_on_exec, to support removing an event > from a task on exec. > > This option supports the case where an event is supposed to be > process-wide only, and should not propagate beyond exec, to limit > monitoring to the original process image only. [...] > +static void perf_remove_from_owner(struct perf_event *event); > +static void perf_event_exit_event(struct perf_event *child_event, > + struct perf_event_context *child_ctx, > + struct task_struct *child); > + > +/* > + * Removes all events from the current task that have been marked > + * remove-on-exec, and feeds their values back to parent events. > + */ > +static void perf_event_remove_on_exec(void) > +{ > + int ctxn; > + > + for_each_task_context_nr(ctxn) { > + struct perf_event_context *ctx; > + struct perf_event *event, *next; > + > + ctx = perf_pin_task_context(current, ctxn); > + if (!ctx) > + continue; > + mutex_lock(&ctx->mutex); > + > + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { > + if (!event->attr.remove_on_exec) > + continue; > + > + if (!is_kernel_event(event)) > + perf_remove_from_owner(event); > + perf_remove_from_context(event, DETACH_GROUP); > + /* > + * Remove the event and feed back its values to the > + * parent event. > + */ > + perf_event_exit_event(event, ctx, current); > + } > + mutex_unlock(&ctx->mutex); > + put_ctx(ctx); > + } > +} Yikes; it seems this is somehow broken. I just decided to run the remove_on_exec kselftest in a loop like so: for x in {1..10}; do ( tools/testing/selftests/perf_events/remove_on_exec & ) ; done While the kselftest runs pass, I see a number of kernel warnings (below). Any suggestions? I'll go and try to debug this... Thanks, -- Marco ------ >8 ------ hardirqs last disabled at (4150): [<ffffffffa633219b>] sysvec_call_function_single+0xb/0xc0 arch/x86/kernel/smp.c:243 softirqs last enabled at (3846): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 softirqs last disabled at (3839): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 ---[ end trace 74c79be9940ec2d1 ]--- ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1369 at kernel/events/core.c:247 event_function+0xef/0x100 kernel/events/core.c:249 Modules linked in: CPU: 3 PID: 1369 Comm: exe Tainted: G W 5.12.0-rc2+ #19 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 RIP: 0010:event_function+0xef/0x100 kernel/events/core.c:247 Code: 5b 5d 41 5c 41 5d 41 5e 41 5f c3 65 8b 05 a5 79 88 5a 85 c0 0f 84 6e ff ff ff 0f 0b e9 67 ff ff ff 4c 39 f5 74 a7 0f 0b eb a3 <0f> 0b eb 9f 0f 0b eb 96 41 bd fd ff ff ff eb ac 90 48 8b 47 10 48 RSP: 0000:ffff980880158f70 EFLAGS: 00010086 RAX: 0000000000000000 RBX: ffff98088111fde0 RCX: 944f9e9405e234a1 RDX: ffff8a5d4d2ac340 RSI: ffffffffa6b4ccef RDI: ffff8a606fcf0c08 RBP: ffff8a606fcf0c00 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: 0000000000000000 R13: ffff8a5d4e6db800 R14: ffff8a5d46534a00 R15: ffff8a606fcf0c08 FS: 0000000000000000(0000) GS:ffff8a606fcc0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fd2b331e225 CR3: 00000001e0e22006 CR4: 0000000000770ee0 DR0: 0000564596006388 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000600 PKRU: 55555554 Call Trace: <IRQ> remote_function kernel/events/core.c:91 [inline] remote_function+0x44/0x50 kernel/events/core.c:71 flush_smp_call_function_queue+0x13a/0x1d0 kernel/smp.c:395 __sysvec_call_function_single+0x3e/0x1c0 arch/x86/kernel/smp.c:248 sysvec_call_function_single+0x89/0xc0 arch/x86/kernel/smp.c:243 </IRQ> asm_sysvec_call_function_single+0x12/0x20 arch/x86/include/asm/idtentry.h:640 RIP: 0010:lock_page_memcg+0xc7/0x170 mm/memcontrol.c:2157 Code: 00 00 e8 6c ae e9 ff 48 c7 c6 d3 07 83 a5 58 4c 89 f7 e8 6c ab e9 ff 48 85 db 74 06 e8 22 e1 f3 ff fb 41 8b 84 24 00 0b 00 00 <85> c0 7e a7 4d 8d b4 24 70 06 00 00 4c 89 f7 e8 85 b2 b0 00 48 89 RSP: 0000:ffff980881bc7b38 EFLAGS: 00000206 RAX: 0000000000000000 RBX: 0000000000000200 RCX: 0000000000000006 RDX: 0000000000000000 RSI: ffffffffa6c1a6ed RDI: ffffffffa6b9ab37 RBP: ffffccff47891b80 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a5d403e9000 R13: ffffffffa58307d3 R14: ffff8a5d403e9688 R15: ffff8a5d47067128 page_remove_rmap+0xc/0xb0 mm/rmap.c:1348 zap_pte_range mm/memory.c:1276 [inline] zap_pmd_range mm/memory.c:1380 [inline] zap_pud_range mm/memory.c:1409 [inline] zap_p4d_range mm/memory.c:1430 [inline] unmap_page_range+0x612/0xb00 mm/memory.c:1451 unmap_vmas+0xbe/0x150 mm/memory.c:1528 exit_mmap+0x8f/0x1d0 mm/mmap.c:3218 __mmput kernel/fork.c:1082 [inline] mmput+0x3c/0xe0 kernel/fork.c:1103 exit_mm kernel/exit.c:501 [inline] do_exit+0x369/0xb60 kernel/exit.c:812 do_group_exit+0x34/0xb0 kernel/exit.c:922 get_signal+0x170/0xc80 kernel/signal.c:2775 arch_do_signal_or_restart+0xea/0x740 arch/x86/kernel/signal.c:811 handle_signal_work kernel/entry/common.c:147 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x10f/0x190 kernel/entry/common.c:208 irqentry_exit_to_user_mode+0x5/0x30 kernel/entry/common.c:314 asm_sysvec_reschedule_ipi+0x12/0x20 arch/x86/include/asm/idtentry.h:637 RIP: 0033:0x5598fc00409b Code: Unable to access opcode bytes at RIP 0x5598fc004071. RSP: 002b:00007ffe94151cf0 EFLAGS: 00000246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 00007f6db39331b0 RDX: 0000000000000004 RSI: 00007ffe94151cfc RDI: 0000000000000001 RBP: 00007ffe94151da0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000059 R11: 0000000000000246 R12: 00005598fc0010d0 R13: 00007ffe94151ea0 R14: 0000000000000000 R15: 0000000000000000 irq event stamp: 4150 hardirqs last enabled at (4149): [<ffffffffa583080e>] lock_page_memcg+0xbe/0x170 mm/memcontrol.c:2154 hardirqs last disabled at (4150): [<ffffffffa633219b>] sysvec_call_function_single+0xb/0xc0 arch/x86/kernel/smp.c:243 softirqs last enabled at (3846): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 softirqs last disabled at (3839): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 ---[ end trace 74c79be9940ec2d2 ]--- ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1369 at kernel/events/core.c:2253 event_sched_out+0x4c/0x200 kernel/events/core.c:2253 Modules linked in: CPU: 3 PID: 1369 Comm: exe Tainted: G W 5.12.0-rc2+ #19 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 RIP: 0010:event_sched_out+0x4c/0x200 kernel/events/core.c:2253 Code: 92 01 85 c9 75 12 83 bb a8 00 00 00 01 74 26 5b 5d 41 5c 41 5d 41 5e c3 48 8d 7d 20 be ff ff ff ff e8 18 cd b9 00 85 c0 75 dc <0f> 0b 83 bb a8 00 00 00 01 75 da 48 8b 53 28 48 8b 4b 20 48 8d 43 RSP: 0000:ffff980880158f18 EFLAGS: 00010046 RAX: 0000000000000000 RBX: ffff8a5d4e6db800 RCX: 0000000000000001 RDX: 0000000000000000 RSI: ffffffffa6b4ccef RDI: ffffffffa6b9ab37 RBP: ffff8a5d46534a00 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a606fcf0c00 R13: ffff8a606fcf0c00 R14: ffff8a5d46534a00 R15: ffff8a606fcf0c08 FS: 0000000000000000(0000) GS:ffff8a606fcc0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fd2b331e225 CR3: 00000001e0e22006 CR4: 0000000000770ee0 DR0: 0000564596006388 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000600 PKRU: 55555554 Call Trace: <IRQ> __perf_remove_from_context+0x29/0xd0 kernel/events/core.c:2333 event_function+0xab/0x100 kernel/events/core.c:252 remote_function kernel/events/core.c:91 [inline] remote_function+0x44/0x50 kernel/events/core.c:71 flush_smp_call_function_queue+0x13a/0x1d0 kernel/smp.c:395 __sysvec_call_function_single+0x3e/0x1c0 arch/x86/kernel/smp.c:248 sysvec_call_function_single+0x89/0xc0 arch/x86/kernel/smp.c:243 </IRQ> asm_sysvec_call_function_single+0x12/0x20 arch/x86/include/asm/idtentry.h:640 RIP: 0010:lock_page_memcg+0xc7/0x170 mm/memcontrol.c:2157 Code: 00 00 e8 6c ae e9 ff 48 c7 c6 d3 07 83 a5 58 4c 89 f7 e8 6c ab e9 ff 48 85 db 74 06 e8 22 e1 f3 ff fb 41 8b 84 24 00 0b 00 00 <85> c0 7e a7 4d 8d b4 24 70 06 00 00 4c 89 f7 e8 85 b2 b0 00 48 89 RSP: 0000:ffff980881bc7b38 EFLAGS: 00000206 RAX: 0000000000000000 RBX: 0000000000000200 RCX: 0000000000000006 RDX: 0000000000000000 RSI: ffffffffa6c1a6ed RDI: ffffffffa6b9ab37 RBP: ffffccff47891b80 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a5d403e9000 R13: ffffffffa58307d3 R14: ffff8a5d403e9688 R15: ffff8a5d47067128 page_remove_rmap+0xc/0xb0 mm/rmap.c:1348 zap_pte_range mm/memory.c:1276 [inline] zap_pmd_range mm/memory.c:1380 [inline] zap_pud_range mm/memory.c:1409 [inline] zap_p4d_range mm/memory.c:1430 [inline] unmap_page_range+0x612/0xb00 mm/memory.c:1451 unmap_vmas+0xbe/0x150 mm/memory.c:1528 exit_mmap+0x8f/0x1d0 mm/mmap.c:3218 __mmput kernel/fork.c:1082 [inline] mmput+0x3c/0xe0 kernel/fork.c:1103 exit_mm kernel/exit.c:501 [inline] do_exit+0x369/0xb60 kernel/exit.c:812 do_group_exit+0x34/0xb0 kernel/exit.c:922 get_signal+0x170/0xc80 kernel/signal.c:2775 arch_do_signal_or_restart+0xea/0x740 arch/x86/kernel/signal.c:811 handle_signal_work kernel/entry/common.c:147 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x10f/0x190 kernel/entry/common.c:208 irqentry_exit_to_user_mode+0x5/0x30 kernel/entry/common.c:314 asm_sysvec_reschedule_ipi+0x12/0x20 arch/x86/include/asm/idtentry.h:637 RIP: 0033:0x5598fc00409b Code: Unable to access opcode bytes at RIP 0x5598fc004071. RSP: 002b:00007ffe94151cf0 EFLAGS: 00000246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 00007f6db39331b0 RDX: 0000000000000004 RSI: 00007ffe94151cfc RDI: 0000000000000001 RBP: 00007ffe94151da0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000059 R11: 0000000000000246 R12: 00005598fc0010d0 R13: 00007ffe94151ea0 R14: 0000000000000000 R15: 0000000000000000 irq event stamp: 4150 hardirqs last enabled at (4149): [<ffffffffa583080e>] lock_page_memcg+0xbe/0x170 mm/memcontrol.c:2154 hardirqs last disabled at (4150): [<ffffffffa633219b>] sysvec_call_function_single+0xb/0xc0 arch/x86/kernel/smp.c:243 softirqs last enabled at (3846): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 softirqs last disabled at (3839): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 ---[ end trace 74c79be9940ec2d3 ]--- ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1369 at kernel/events/core.c:2152 perf_group_detach+0xe1/0x300 kernel/events/core.c:2152 Modules linked in: CPU: 3 PID: 1369 Comm: exe Tainted: G W 5.12.0-rc2+ #19 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 RIP: 0010:perf_group_detach+0xe1/0x300 kernel/events/core.c:2152 Code: 41 5c 41 5d 41 5e 41 5f e9 bc 54 ff ff 48 8b 87 20 02 00 00 be ff ff ff ff 48 8d 78 20 e8 27 88 b9 00 85 c0 0f 85 41 ff ff ff <0f> 0b e9 3a ff ff ff 48 8b 45 10 4c 8b 28 48 8d 58 f0 49 83 ed 10 RSP: 0000:ffff980880158f10 EFLAGS: 00010046 RAX: 0000000000000000 RBX: ffff8a5d4e6db800 RCX: 0000000000000001 RDX: 0000000000000000 RSI: ffffffffa6b4ccef RDI: ffffffffa6b9ab37 RBP: ffff8a5d4e6db800 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a606fcf0c00 R13: 0000000000000001 R14: ffff8a5d46534a00 R15: ffff8a606fcf0c08 FS: 0000000000000000(0000) GS:ffff8a606fcc0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fd2b331e225 CR3: 00000001e0e22006 CR4: 0000000000770ee0 DR0: 0000564596006388 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000600 PKRU: 55555554 Call Trace: <IRQ> __perf_remove_from_context+0x91/0xd0 kernel/events/core.c:2335 event_function+0xab/0x100 kernel/events/core.c:252 remote_function kernel/events/core.c:91 [inline] remote_function+0x44/0x50 kernel/events/core.c:71 flush_smp_call_function_queue+0x13a/0x1d0 kernel/smp.c:395 __sysvec_call_function_single+0x3e/0x1c0 arch/x86/kernel/smp.c:248 sysvec_call_function_single+0x89/0xc0 arch/x86/kernel/smp.c:243 </IRQ> asm_sysvec_call_function_single+0x12/0x20 arch/x86/include/asm/idtentry.h:640 RIP: 0010:lock_page_memcg+0xc7/0x170 mm/memcontrol.c:2157 Code: 00 00 e8 6c ae e9 ff 48 c7 c6 d3 07 83 a5 58 4c 89 f7 e8 6c ab e9 ff 48 85 db 74 06 e8 22 e1 f3 ff fb 41 8b 84 24 00 0b 00 00 <85> c0 7e a7 4d 8d b4 24 70 06 00 00 4c 89 f7 e8 85 b2 b0 00 48 89 RSP: 0000:ffff980881bc7b38 EFLAGS: 00000206 RAX: 0000000000000000 RBX: 0000000000000200 RCX: 0000000000000006 RDX: 0000000000000000 RSI: ffffffffa6c1a6ed RDI: ffffffffa6b9ab37 RBP: ffffccff47891b80 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a5d403e9000 R13: ffffffffa58307d3 R14: ffff8a5d403e9688 R15: ffff8a5d47067128 page_remove_rmap+0xc/0xb0 mm/rmap.c:1348 zap_pte_range mm/memory.c:1276 [inline] zap_pmd_range mm/memory.c:1380 [inline] zap_pud_range mm/memory.c:1409 [inline] zap_p4d_range mm/memory.c:1430 [inline] unmap_page_range+0x612/0xb00 mm/memory.c:1451 unmap_vmas+0xbe/0x150 mm/memory.c:1528 exit_mmap+0x8f/0x1d0 mm/mmap.c:3218 __mmput kernel/fork.c:1082 [inline] mmput+0x3c/0xe0 kernel/fork.c:1103 exit_mm kernel/exit.c:501 [inline] do_exit+0x369/0xb60 kernel/exit.c:812 do_group_exit+0x34/0xb0 kernel/exit.c:922 get_signal+0x170/0xc80 kernel/signal.c:2775 arch_do_signal_or_restart+0xea/0x740 arch/x86/kernel/signal.c:811 handle_signal_work kernel/entry/common.c:147 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x10f/0x190 kernel/entry/common.c:208 irqentry_exit_to_user_mode+0x5/0x30 kernel/entry/common.c:314 asm_sysvec_reschedule_ipi+0x12/0x20 arch/x86/include/asm/idtentry.h:637 RIP: 0033:0x5598fc00409b Code: Unable to access opcode bytes at RIP 0x5598fc004071. RSP: 002b:00007ffe94151cf0 EFLAGS: 00000246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 00007f6db39331b0 RDX: 0000000000000004 RSI: 00007ffe94151cfc RDI: 0000000000000001 RBP: 00007ffe94151da0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000059 R11: 0000000000000246 R12: 00005598fc0010d0 R13: 00007ffe94151ea0 R14: 0000000000000000 R15: 0000000000000000 irq event stamp: 4150 hardirqs last enabled at (4149): [<ffffffffa583080e>] lock_page_memcg+0xbe/0x170 mm/memcontrol.c:2154 hardirqs last disabled at (4150): [<ffffffffa633219b>] sysvec_call_function_single+0xb/0xc0 arch/x86/kernel/smp.c:243 softirqs last enabled at (3846): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 softirqs last disabled at (3839): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 ---[ end trace 74c79be9940ec2d4 ]--- ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1369 at kernel/events/core.c:1993 list_del_event+0xaf/0x110 kernel/events/core.c:1993 Modules linked in: CPU: 3 PID: 1369 Comm: exe Tainted: G W 5.12.0-rc2+ #19 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 RIP: 0010:list_del_event+0xaf/0x110 kernel/events/core.c:1993 Code: 00 00 01 eb ba be ff ff ff ff 48 89 ef e8 b9 fe ff ff eb db 48 8d 7b 20 be ff ff ff ff e8 39 1d ba 00 85 c0 0f 85 72 ff ff ff <0f> 0b e9 6b ff ff ff 48 8d 83 e8 00 00 00 f6 85 08 01 00 00 04 48 RSP: 0000:ffff980880158f28 EFLAGS: 00010046 RAX: 0000000000000000 RBX: ffff8a5d46534a00 RCX: 0000000000000001 RDX: 0000000000000000 RSI: ffffffffa6b4ccef RDI: ffffffffa6b9ab37 RBP: ffff8a5d4e6db800 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a606fcf0c00 R13: 0000000000000001 R14: ffff8a5d46534a00 R15: ffff8a606fcf0c08 FS: 0000000000000000(0000) GS:ffff8a606fcc0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fd2b331e225 CR3: 00000001e0e22006 CR4: 0000000000770ee0 DR0: 0000564596006388 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000600 PKRU: 55555554 Call Trace: <IRQ> __perf_remove_from_context+0x3a/0xd0 kernel/events/core.c:2336 event_function+0xab/0x100 kernel/events/core.c:252 remote_function kernel/events/core.c:91 [inline] remote_function+0x44/0x50 kernel/events/core.c:71 flush_smp_call_function_queue+0x13a/0x1d0 kernel/smp.c:395 __sysvec_call_function_single+0x3e/0x1c0 arch/x86/kernel/smp.c:248 sysvec_call_function_single+0x89/0xc0 arch/x86/kernel/smp.c:243 </IRQ> asm_sysvec_call_function_single+0x12/0x20 arch/x86/include/asm/idtentry.h:640 RIP: 0010:lock_page_memcg+0xc7/0x170 mm/memcontrol.c:2157 Code: 00 00 e8 6c ae e9 ff 48 c7 c6 d3 07 83 a5 58 4c 89 f7 e8 6c ab e9 ff 48 85 db 74 06 e8 22 e1 f3 ff fb 41 8b 84 24 00 0b 00 00 <85> c0 7e a7 4d 8d b4 24 70 06 00 00 4c 89 f7 e8 85 b2 b0 00 48 89 RSP: 0000:ffff980881bc7b38 EFLAGS: 00000206 RAX: 0000000000000000 RBX: 0000000000000200 RCX: 0000000000000006 RDX: 0000000000000000 RSI: ffffffffa6c1a6ed RDI: ffffffffa6b9ab37 RBP: ffffccff47891b80 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000000 R11: ffff8a5d4d2accb8 R12: ffff8a5d403e9000 R13: ffffffffa58307d3 R14: ffff8a5d403e9688 R15: ffff8a5d47067128 page_remove_rmap+0xc/0xb0 mm/rmap.c:1348 zap_pte_range mm/memory.c:1276 [inline] zap_pmd_range mm/memory.c:1380 [inline] zap_pud_range mm/memory.c:1409 [inline] zap_p4d_range mm/memory.c:1430 [inline] unmap_page_range+0x612/0xb00 mm/memory.c:1451 unmap_vmas+0xbe/0x150 mm/memory.c:1528 exit_mmap+0x8f/0x1d0 mm/mmap.c:3218 __mmput kernel/fork.c:1082 [inline] mmput+0x3c/0xe0 kernel/fork.c:1103 exit_mm kernel/exit.c:501 [inline] do_exit+0x369/0xb60 kernel/exit.c:812 do_group_exit+0x34/0xb0 kernel/exit.c:922 get_signal+0x170/0xc80 kernel/signal.c:2775 arch_do_signal_or_restart+0xea/0x740 arch/x86/kernel/signal.c:811 handle_signal_work kernel/entry/common.c:147 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x10f/0x190 kernel/entry/common.c:208 irqentry_exit_to_user_mode+0x5/0x30 kernel/entry/common.c:314 asm_sysvec_reschedule_ipi+0x12/0x20 arch/x86/include/asm/idtentry.h:637 RIP: 0033:0x5598fc00409b Code: Unable to access opcode bytes at RIP 0x5598fc004071. RSP: 002b:00007ffe94151cf0 EFLAGS: 00000246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 00007f6db39331b0 RDX: 0000000000000004 RSI: 00007ffe94151cfc RDI: 0000000000000001 RBP: 00007ffe94151da0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000059 R11: 0000000000000246 R12: 00005598fc0010d0 R13: 00007ffe94151ea0 R14: 0000000000000000 R15: 0000000000000000 irq event stamp: 4150 hardirqs last enabled at (4149): [<ffffffffa583080e>] lock_page_memcg+0xbe/0x170 mm/memcontrol.c:2154 hardirqs last disabled at (4150): [<ffffffffa633219b>] sysvec_call_function_single+0xb/0xc0 arch/x86/kernel/smp.c:243 softirqs last enabled at (3846): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last enabled at (3846): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 softirqs last disabled at (3839): [<ffffffffa566f621>] invoke_softirq kernel/softirq.c:221 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] __irq_exit_rcu kernel/softirq.c:422 [inline] softirqs last disabled at (3839): [<ffffffffa566f621>] irq_exit_rcu+0xe1/0x120 kernel/softirq.c:434 ---[ end trace 74c79be9940ec2d5 ]---
On Wed, Mar 10, 2021 at 11:41:34AM +0100, Marco Elver wrote: > Adds bit perf_event_attr::remove_on_exec, to support removing an event > from a task on exec. > > This option supports the case where an event is supposed to be > process-wide only, and should not propagate beyond exec, to limit > monitoring to the original process image only. > > Signed-off-by: Marco Elver <elver@google.com> > +/* > + * Removes all events from the current task that have been marked > + * remove-on-exec, and feeds their values back to parent events. > + */ > +static void perf_event_remove_on_exec(void) > +{ > + int ctxn; > + > + for_each_task_context_nr(ctxn) { > + struct perf_event_context *ctx; > + struct perf_event *event, *next; > + > + ctx = perf_pin_task_context(current, ctxn); > + if (!ctx) > + continue; > + mutex_lock(&ctx->mutex); > + > + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { > + if (!event->attr.remove_on_exec) > + continue; > + > + if (!is_kernel_event(event)) > + perf_remove_from_owner(event); > + perf_remove_from_context(event, DETACH_GROUP); There's a comment on this in perf_event_exit_event(), if this task happens to have the original event, then DETACH_GROUP will destroy the grouping. I think this wants to be: perf_remove_from_text(event, child_event->parent ? DETACH_GROUP : 0); or something. > + /* > + * Remove the event and feed back its values to the > + * parent event. > + */ > + perf_event_exit_event(event, ctx, current); Oooh, and here we call it... but it will do list_del_even() / perf_group_detach() *again*. So the problem is that perf_event_exit_task_context() doesn't use remove_from_context(), but instead does task_ctx_sched_out() and then relies on the events not being active. Whereas above you *DO* use remote_from_context(), but then perf_event_exit_event() will try and remove it more. > + } > + mutex_unlock(&ctx->mutex); perf_unpin_context(ctx); > + put_ctx(ctx); > + } > +}
On Tue, Mar 16, 2021 at 05:22PM +0100, Peter Zijlstra wrote: > On Wed, Mar 10, 2021 at 11:41:34AM +0100, Marco Elver wrote: > > Adds bit perf_event_attr::remove_on_exec, to support removing an event > > from a task on exec. > > > > This option supports the case where an event is supposed to be > > process-wide only, and should not propagate beyond exec, to limit > > monitoring to the original process image only. > > > > Signed-off-by: Marco Elver <elver@google.com> > > > +/* > > + * Removes all events from the current task that have been marked > > + * remove-on-exec, and feeds their values back to parent events. > > + */ > > +static void perf_event_remove_on_exec(void) > > +{ > > + int ctxn; > > + > > + for_each_task_context_nr(ctxn) { > > + struct perf_event_context *ctx; > > + struct perf_event *event, *next; > > + > > + ctx = perf_pin_task_context(current, ctxn); > > + if (!ctx) > > + continue; > > + mutex_lock(&ctx->mutex); > > + > > + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { > > + if (!event->attr.remove_on_exec) > > + continue; > > + > > + if (!is_kernel_event(event)) > > + perf_remove_from_owner(event); > > + perf_remove_from_context(event, DETACH_GROUP); > > There's a comment on this in perf_event_exit_event(), if this task > happens to have the original event, then DETACH_GROUP will destroy the > grouping. > > I think this wants to be: > > perf_remove_from_text(event, > child_event->parent ? DETACH_GROUP : 0); > > or something. > > > + /* > > + * Remove the event and feed back its values to the > > + * parent event. > > + */ > > + perf_event_exit_event(event, ctx, current); > > Oooh, and here we call it... but it will do list_del_even() / > perf_group_detach() *again*. > > So the problem is that perf_event_exit_task_context() doesn't use > remove_from_context(), but instead does task_ctx_sched_out() and then > relies on the events not being active. > > Whereas above you *DO* use remote_from_context(), but then > perf_event_exit_event() will try and remove it more. AFAIK, we want to deallocate the events and not just remove them, so doing what perf_event_exit_event() is the right way forward? Or did you have something else in mind? I'm still trying to make sense of the zoo of synchronisation mechanisms at play here. No matter what I try, it seems I get stuck on the fact that I can't cleanly "pause" the context to remove the events (warnings in event_function()). This is what I've been playing with to understand: diff --git a/kernel/events/core.c b/kernel/events/core.c index 450ea9415ed7..c585cef284a0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4195,6 +4195,88 @@ static void perf_event_enable_on_exec(int ctxn) put_ctx(clone_ctx); } +static void perf_remove_from_owner(struct perf_event *event); +static void perf_event_exit_event(struct perf_event *child_event, + struct perf_event_context *child_ctx, + struct task_struct *child); + +/* + * Removes all events from the current task that have been marked + * remove-on-exec, and feeds their values back to parent events. + */ +static void perf_event_remove_on_exec(void) +{ + struct perf_event *event, *next; + int ctxn; + + /***************** BROKEN BROKEN BROKEN *****************/ + + for_each_task_context_nr(ctxn) { + struct perf_event_context *ctx; + bool removed = false; + + ctx = perf_pin_task_context(current, ctxn); + if (!ctx) + continue; + mutex_lock(&ctx->mutex); + + raw_spin_lock_irq(&ctx->lock); + /* + * WIP: Ok, we will unschedule the context, _and_ tell everyone + * still trying to use that it's dead... even though it isn't. + * + * This can't be right... + */ + task_ctx_sched_out(__get_cpu_context(ctx), ctx, EVENT_ALL); + RCU_INIT_POINTER(current->perf_event_ctxp[ctxn], NULL); + WRITE_ONCE(ctx->task, TASK_TOMBSTONE); This code here is obviously bogus, because it removes the context from the task: we might still need it since this task is not dead yet. What's the right way to pause the context to remove the events from it? + raw_spin_unlock_irq(&ctx->lock); + + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { + if (!event->attr.remove_on_exec) + continue; + removed = true; + + if (!is_kernel_event(event)) + perf_remove_from_owner(event); + + /* + * WIP: Want to free the event and feed back its values + * to the parent (if any) ... + */ + perf_event_exit_event(event, ctx, current); + } + ... need to schedule context back in here? + + mutex_unlock(&ctx->mutex); + perf_unpin_context(ctx); + put_ctx(ctx); + } +} + struct perf_read_data { struct perf_event *event; bool group; @@ -7553,6 +7635,8 @@ void perf_event_exec(void) true); } rcu_read_unlock(); + + perf_event_remove_on_exec(); } Thanks, -- Marco
On Wed, Mar 10, 2021 at 11:41AM +0100, Marco Elver wrote: > Add kselftest to test that remove_on_exec removes inherited events from > child tasks. > > Signed-off-by: Marco Elver <elver@google.com> To make compatible with more recent libc, we'll need to fixup the tests with the below. Also, I've seen that tools/perf/tests exists, however it seems to be primarily about perf-tool related tests. Is this correct? I'd propose to keep these purely kernel ABI related tests separate, and that way we can also make use of the kselftests framework which will also integrate into various CI systems such as kernelci.org. Thanks, -- Marco ------ >8 ------ diff --git a/tools/testing/selftests/perf_events/remove_on_exec.c b/tools/testing/selftests/perf_events/remove_on_exec.c index e176b3a74d55..f89d0cfdb81e 100644 --- a/tools/testing/selftests/perf_events/remove_on_exec.c +++ b/tools/testing/selftests/perf_events/remove_on_exec.c @@ -13,6 +13,11 @@ #define __have_siginfo_t 1 #define __have_sigval_t 1 #define __have_sigevent_t 1 +#define __siginfo_t_defined +#define __sigval_t_defined +#define __sigevent_t_defined +#define _BITS_SIGINFO_CONSTS_H 1 +#define _BITS_SIGEVENT_CONSTS_H 1 #include <linux/perf_event.h> #include <pthread.h> diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c index 7ebb9bb34c2e..b9a7d4b64b3c 100644 --- a/tools/testing/selftests/perf_events/sigtrap_threads.c +++ b/tools/testing/selftests/perf_events/sigtrap_threads.c @@ -13,6 +13,11 @@ #define __have_siginfo_t 1 #define __have_sigval_t 1 #define __have_sigevent_t 1 +#define __siginfo_t_defined +#define __sigval_t_defined +#define __sigevent_t_defined +#define _BITS_SIGINFO_CONSTS_H 1 +#define _BITS_SIGEVENT_CONSTS_H 1 #include <linux/hw_breakpoint.h> #include <linux/perf_event.h>
On Mon, Mar 22, 2021 at 02:24:40PM +0100, Marco Elver wrote: > To make compatible with more recent libc, we'll need to fixup the tests > with the below. OK, that reprodiced things here, thanks! The below seems to not explode instantly.... it still has the alternative version in as well (and I think it might even work too, but the one I left in seems simpler). --- kernel/events/core.c | 154 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 111 insertions(+), 43 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index a7220e8c447e..8c0f905cc017 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2167,8 +2172,9 @@ static void perf_group_detach(struct perf_event *event) * If this is a sibling, remove it from its group. */ if (leader != event) { + leader->nr_siblings--; list_del_init(&event->sibling_list); - event->group_leader->nr_siblings--; + event->group_leader = event; goto out; } @@ -2182,8 +2188,9 @@ static void perf_group_detach(struct perf_event *event) if (sibling->event_caps & PERF_EV_CAP_SIBLING) perf_remove_sibling_event(sibling); - sibling->group_leader = sibling; + leader->nr_siblings--; list_del_init(&sibling->sibling_list); + sibling->group_leader = sibling; /* Inherit group flags from the previous leader */ sibling->group_caps = event->group_caps; @@ -2360,10 +2367,19 @@ __perf_remove_from_context(struct perf_event *event, static void perf_remove_from_context(struct perf_event *event, unsigned long flags) { struct perf_event_context *ctx = event->ctx; + bool remove; lockdep_assert_held(&ctx->mutex); - event_function_call(event, __perf_remove_from_context, (void *)flags); + /* + * There is concurrency vs remove_on_exec(). + */ + raw_spin_lock_irq(&ctx->lock); + remove = (event->attach_state & PERF_ATTACH_CONTEXT); + raw_spin_unlock_irq(&ctx->lock); + + if (remove) + event_function_call(event, __perf_remove_from_context, (void *)flags); /* * The above event_function_call() can NO-OP when it hits @@ -4232,41 +4248,92 @@ static void perf_event_enable_on_exec(int ctxn) static void perf_remove_from_owner(struct perf_event *event); static void perf_event_exit_event(struct perf_event *child_event, struct perf_event_context *child_ctx, - struct task_struct *child); + struct task_struct *child, + bool removed); /* * Removes all events from the current task that have been marked * remove-on-exec, and feeds their values back to parent events. */ -static void perf_event_remove_on_exec(void) +static void perf_event_remove_on_exec(int ctxn) { - int ctxn; + struct perf_event_context *ctx, *clone_ctx = NULL; + struct perf_event *event, *next; + LIST_HEAD(free_list); + unsigned long flags; + bool modified = false; - for_each_task_context_nr(ctxn) { - struct perf_event_context *ctx; - struct perf_event *event, *next; + ctx = perf_pin_task_context(current, ctxn); + if (!ctx) + return; - ctx = perf_pin_task_context(current, ctxn); - if (!ctx) + mutex_lock(&ctx->mutex); + + if (WARN_ON_ONCE(ctx->task != current)) + goto unlock; + + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { + if (!event->attr.remove_on_exec) continue; - mutex_lock(&ctx->mutex); - list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { - if (!event->attr.remove_on_exec) - continue; + if (!is_kernel_event(event)) + perf_remove_from_owner(event); - if (!is_kernel_event(event)) - perf_remove_from_owner(event); - perf_remove_from_context(event, DETACH_GROUP); - /* - * Remove the event and feed back its values to the - * parent event. - */ - perf_event_exit_event(event, ctx, current); - } - mutex_unlock(&ctx->mutex); - put_ctx(ctx); + modified = true; + + perf_remove_from_context(event, !!event->parent * DETACH_GROUP); + perf_event_exit_event(event, ctx, current, true); + } + + raw_spin_lock_irqsave(&ctx->lock, flags); + if (modified) + clone_ctx = unclone_ctx(ctx); + --ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); + +#if 0 + struct perf_cpu_context *cpuctx; + + if (!modified) { + perf_unpin_context(ctx); + goto unlock; + } + + local_irq_save(flags); + cpuctx = __get_cpu_context(ctx); + perf_ctx_lock(cpuctx, ctx); + task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); + + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { + if (!event->attr.remove_on_exec) + continue; + + if (event->parent) + perf_group_detach(event); + list_del_event(event, ctx); + + list_add(&event->active_list, &free_list); + } + + ctx_resched(cpuctx, ctx, EVENT_ALL); + + clone_ctx = unclone_ctx(ctx); + --ctx->pin_count; + perf_ctx_unlock(cpuctx, ctx); + local_irq_restore(flags); + + list_for_each_entry_safe(event, next, &free_list, active_entry) { + list_del(&event->active_entry); + perf_event_exit_event(event, ctx, current, true); } +#endif + +unlock: + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); + if (clone_ctx) + put_ctx(clone_ctx); } struct perf_read_data { @@ -7615,20 +7682,18 @@ void perf_event_exec(void) struct perf_event_context *ctx; int ctxn; - rcu_read_lock(); for_each_task_context_nr(ctxn) { - ctx = current->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - perf_event_enable_on_exec(ctxn); + perf_event_remove_on_exec(ctxn); - perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, - true); + rcu_read_lock(); + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) { + perf_iterate_ctx(ctx, perf_event_addr_filters_exec, + NULL, true); + } + rcu_read_unlock(); } - rcu_read_unlock(); - - perf_event_remove_on_exec(); } struct remote_output { @@ -12509,7 +12574,7 @@ static void sync_child_event(struct perf_event *child_event, static void perf_event_exit_event(struct perf_event *child_event, struct perf_event_context *child_ctx, - struct task_struct *child) + struct task_struct *child, bool removed) { struct perf_event *parent_event = child_event->parent; @@ -12526,12 +12591,15 @@ perf_event_exit_event(struct perf_event *child_event, * and being thorough is better. */ raw_spin_lock_irq(&child_ctx->lock); - WARN_ON_ONCE(child_ctx->is_active); + if (!removed) { + WARN_ON_ONCE(child_ctx->is_active); - if (parent_event) - perf_group_detach(child_event); - list_del_event(child_event, child_ctx); - perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ + if (parent_event) + perf_group_detach(child_event); + list_del_event(child_event, child_ctx); + } + if (child_event->state >= PERF_EVENT_STATE_EXIT) + perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ raw_spin_unlock_irq(&child_ctx->lock); /* @@ -12617,7 +12685,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) perf_event_task(child, child_ctx, 0); list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) - perf_event_exit_event(child_event, child_ctx, child); + perf_event_exit_event(child_event, child_ctx, child, false); mutex_unlock(&child_ctx->mutex);
On Mon, Mar 22, 2021 at 6:24 AM Marco Elver <elver@google.com> wrote: > > On Wed, Mar 10, 2021 at 11:41AM +0100, Marco Elver wrote: > > Add kselftest to test that remove_on_exec removes inherited events from > > child tasks. > > > > Signed-off-by: Marco Elver <elver@google.com> > > To make compatible with more recent libc, we'll need to fixup the tests > with the below. > > Also, I've seen that tools/perf/tests exists, however it seems to be > primarily about perf-tool related tests. Is this correct? > > I'd propose to keep these purely kernel ABI related tests separate, and > that way we can also make use of the kselftests framework which will > also integrate into various CI systems such as kernelci.org. Perhaps there is a way to have both? Having the perf tool spot an errant kernel feels like a feature. There are also tools/lib/perf/tests and Vince Weaver's tests [1]. It is possible to run standalone tests from within perf test by having them be executed by a shell test. Thanks, Ian [1] https://github.com/deater/perf_event_tests > Thanks, > -- Marco > > ------ >8 ------ > > diff --git a/tools/testing/selftests/perf_events/remove_on_exec.c b/tools/testing/selftests/perf_events/remove_on_exec.c > index e176b3a74d55..f89d0cfdb81e 100644 > --- a/tools/testing/selftests/perf_events/remove_on_exec.c > +++ b/tools/testing/selftests/perf_events/remove_on_exec.c > @@ -13,6 +13,11 @@ > #define __have_siginfo_t 1 > #define __have_sigval_t 1 > #define __have_sigevent_t 1 > +#define __siginfo_t_defined > +#define __sigval_t_defined > +#define __sigevent_t_defined > +#define _BITS_SIGINFO_CONSTS_H 1 > +#define _BITS_SIGEVENT_CONSTS_H 1 > > #include <linux/perf_event.h> > #include <pthread.h> > diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c > index 7ebb9bb34c2e..b9a7d4b64b3c 100644 > --- a/tools/testing/selftests/perf_events/sigtrap_threads.c > +++ b/tools/testing/selftests/perf_events/sigtrap_threads.c > @@ -13,6 +13,11 @@ > #define __have_siginfo_t 1 > #define __have_sigval_t 1 > #define __have_sigevent_t 1 > +#define __siginfo_t_defined > +#define __sigval_t_defined > +#define __sigevent_t_defined > +#define _BITS_SIGINFO_CONSTS_H 1 > +#define _BITS_SIGEVENT_CONSTS_H 1 > > #include <linux/hw_breakpoint.h> > #include <linux/perf_event.h>
On Tue, 23 Mar 2021 at 04:10, Ian Rogers <irogers@google.com> wrote: > On Mon, Mar 22, 2021 at 6:24 AM Marco Elver <elver@google.com> wrote: > > On Wed, Mar 10, 2021 at 11:41AM +0100, Marco Elver wrote: > > > Add kselftest to test that remove_on_exec removes inherited events from > > > child tasks. > > > > > > Signed-off-by: Marco Elver <elver@google.com> > > > > To make compatible with more recent libc, we'll need to fixup the tests > > with the below. > > > > Also, I've seen that tools/perf/tests exists, however it seems to be > > primarily about perf-tool related tests. Is this correct? > > > > I'd propose to keep these purely kernel ABI related tests separate, and > > that way we can also make use of the kselftests framework which will > > also integrate into various CI systems such as kernelci.org. > > Perhaps there is a way to have both? Having the perf tool spot an > errant kernel feels like a feature. There are also > tools/lib/perf/tests and Vince Weaver's tests [1]. It is possible to > run standalone tests from within perf test by having them be executed > by a shell test. Thanks for the pointers. Sure, I'd support more additional tests. But I had another look and it seems the tests in tools/{perf,lib/perf}/tests do focus on perf-tool or the library respectively, so adding kernel ABI tests there feels wrong. (If perf-tool somehow finds use for sigtrap, or remove_on_exec, then having a perf-tool specific test for those would make sense again.) The tests at [1] do seem relevant, and its test strategy seems more extensive, including testing older kernels. Unfortunately it is out-of-tree, but that's probably because it was started before kselftest came into existence. But there are probably things that [1] contains that are not appropriate in-tree. It's all a bit confusing. Going forward, if you insist on tests being also added to [1], we can perhaps mirror some of the kselftest tests there. There's also a logistical problem with the tests added here, because the tests require an up-to-date siginfo_t, and they use the kernel's <asm/siginfo.h> with some trickery. Until libc's siginfo_t is updated, it probably doesn't make sense to add these tests to [1]. The other question is, would it be possible to also copy some of the tests in [1] and convert to kselftest, so that they live in-tree and are tested regularly (CI, ...)? Because I'd much prefer in-tree tests with little boilerplate, that are structured with parsable output; in the kernel we have the kselftest framework for tests with a user space component, and KUnit for pure in-kernel tests. Thanks, -- Marco > Thanks, > Ian > > [1] https://github.com/deater/perf_event_tests [...]
On Mon, Mar 22, 2021 at 05:42PM +0100, Peter Zijlstra wrote: > On Mon, Mar 22, 2021 at 02:24:40PM +0100, Marco Elver wrote: > > To make compatible with more recent libc, we'll need to fixup the tests > > with the below. > > OK, that reprodiced things here, thanks! > > The below seems to not explode instantly.... it still has the > alternative version in as well (and I think it might even work too, but > the one I left in seems simpler). Thanks! Unfortunately neither version worked if I tortured it a little with this: for x in {1..1000}; do ( tools/testing/selftests/perf_events/remove_on_exec & ); done Which resulted in the 2 warnings: WARNING: CPU: 1 PID: 795 at kernel/events/core.c:242 event_function+0xf3/0x100 WARNING: CPU: 1 PID: 795 at kernel/events/core.c:247 event_function+0xef/0x100 with efs->func==__perf_event_enable. I believe it's sufficient to add mutex_lock(&parent_event->child_mutex); list_del_init(&event->child_list); mutex_unlock(&parent_event->child_mutex); right before removing from context. With the version I have now (below for completeness), extended torture with the above test results in no more warnings and the test also passes. I'd be happy to send a non-RFC v3 with all that squashed in. I'd need your Signed-off-by for the diff you sent to proceed (and add your Co-developed-by). Thanks, -- Marco ------ >8 ------ diff --git a/kernel/events/core.c b/kernel/events/core.c index aa47e111435e..cea7c88fe131 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2165,8 +2165,9 @@ static void perf_group_detach(struct perf_event *event) * If this is a sibling, remove it from its group. */ if (leader != event) { + leader->nr_siblings--; list_del_init(&event->sibling_list); - event->group_leader->nr_siblings--; + event->group_leader = event; goto out; } @@ -2180,8 +2181,9 @@ static void perf_group_detach(struct perf_event *event) if (sibling->event_caps & PERF_EV_CAP_SIBLING) perf_remove_sibling_event(sibling); - sibling->group_leader = sibling; + leader->nr_siblings--; list_del_init(&sibling->sibling_list); + sibling->group_leader = sibling; /* Inherit group flags from the previous leader */ sibling->group_caps = event->group_caps; @@ -2358,10 +2360,19 @@ __perf_remove_from_context(struct perf_event *event, static void perf_remove_from_context(struct perf_event *event, unsigned long flags) { struct perf_event_context *ctx = event->ctx; + bool remove; lockdep_assert_held(&ctx->mutex); - event_function_call(event, __perf_remove_from_context, (void *)flags); + /* + * There is concurrency vs remove_on_exec(). + */ + raw_spin_lock_irq(&ctx->lock); + remove = (event->attach_state & PERF_ATTACH_CONTEXT); + raw_spin_unlock_irq(&ctx->lock); + + if (remove) + event_function_call(event, __perf_remove_from_context, (void *)flags); /* * The above event_function_call() can NO-OP when it hits @@ -4198,41 +4209,68 @@ static void perf_event_enable_on_exec(int ctxn) static void perf_remove_from_owner(struct perf_event *event); static void perf_event_exit_event(struct perf_event *child_event, struct perf_event_context *child_ctx, - struct task_struct *child); + struct task_struct *child, + bool removed); /* * Removes all events from the current task that have been marked * remove-on-exec, and feeds their values back to parent events. */ -static void perf_event_remove_on_exec(void) +static void perf_event_remove_on_exec(int ctxn) { - int ctxn; + struct perf_event_context *ctx, *clone_ctx = NULL; + struct perf_event *event, *next; + LIST_HEAD(free_list); + unsigned long flags; + bool modified = false; - for_each_task_context_nr(ctxn) { - struct perf_event_context *ctx; - struct perf_event *event, *next; + ctx = perf_pin_task_context(current, ctxn); + if (!ctx) + return; - ctx = perf_pin_task_context(current, ctxn); - if (!ctx) + mutex_lock(&ctx->mutex); + + if (WARN_ON_ONCE(ctx->task != current)) + goto unlock; + + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { + struct perf_event *parent_event = event->parent; + + if (!event->attr.remove_on_exec) continue; - mutex_lock(&ctx->mutex); - list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { - if (!event->attr.remove_on_exec) - continue; + if (!is_kernel_event(event)) + perf_remove_from_owner(event); - if (!is_kernel_event(event)) - perf_remove_from_owner(event); - perf_remove_from_context(event, DETACH_GROUP); + modified = true; + + if (parent_event) { /* - * Remove the event and feed back its values to the - * parent event. + * Remove event from parent, to avoid race where the + * parent concurrently iterates through its children to + * enable, disable, or otherwise modify an event. */ - perf_event_exit_event(event, ctx, current); + mutex_lock(&parent_event->child_mutex); + list_del_init(&event->child_list); + mutex_unlock(&parent_event->child_mutex); } - mutex_unlock(&ctx->mutex); - put_ctx(ctx); + + perf_remove_from_context(event, !!event->parent * DETACH_GROUP); + perf_event_exit_event(event, ctx, current, true); } + + raw_spin_lock_irqsave(&ctx->lock, flags); + if (modified) + clone_ctx = unclone_ctx(ctx); + --ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); + +unlock: + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); + if (clone_ctx) + put_ctx(clone_ctx); } struct perf_read_data { @@ -7581,20 +7619,18 @@ void perf_event_exec(void) struct perf_event_context *ctx; int ctxn; - rcu_read_lock(); for_each_task_context_nr(ctxn) { - ctx = current->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - perf_event_enable_on_exec(ctxn); + perf_event_remove_on_exec(ctxn); - perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, - true); + rcu_read_lock(); + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) { + perf_iterate_ctx(ctx, perf_event_addr_filters_exec, + NULL, true); + } + rcu_read_unlock(); } - rcu_read_unlock(); - - perf_event_remove_on_exec(); } struct remote_output { @@ -12472,7 +12508,7 @@ static void sync_child_event(struct perf_event *child_event, static void perf_event_exit_event(struct perf_event *child_event, struct perf_event_context *child_ctx, - struct task_struct *child) + struct task_struct *child, bool removed) { struct perf_event *parent_event = child_event->parent; @@ -12489,12 +12525,15 @@ perf_event_exit_event(struct perf_event *child_event, * and being thorough is better. */ raw_spin_lock_irq(&child_ctx->lock); - WARN_ON_ONCE(child_ctx->is_active); + if (!removed) { + WARN_ON_ONCE(child_ctx->is_active); - if (parent_event) - perf_group_detach(child_event); - list_del_event(child_event, child_ctx); - perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ + if (parent_event) + perf_group_detach(child_event); + list_del_event(child_event, child_ctx); + } + if (child_event->state >= PERF_EVENT_STATE_EXIT) + perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ raw_spin_unlock_irq(&child_ctx->lock); /* @@ -12580,7 +12619,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) perf_event_task(child, child_ctx, 0); list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) - perf_event_exit_event(child_event, child_ctx, child); + perf_event_exit_event(child_event, child_ctx, child, false); mutex_unlock(&child_ctx->mutex);
On Tue, Mar 23, 2021 at 10:52:41AM +0100, Marco Elver wrote: > with efs->func==__perf_event_enable. I believe it's sufficient to add > > mutex_lock(&parent_event->child_mutex); > list_del_init(&event->child_list); > mutex_unlock(&parent_event->child_mutex); > > right before removing from context. With the version I have now (below > for completeness), extended torture with the above test results in no > more warnings and the test also passes. > > + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { > + struct perf_event *parent_event = event->parent; > + > + if (!event->attr.remove_on_exec) > continue; > > + if (!is_kernel_event(event)) > + perf_remove_from_owner(event); > > + modified = true; > + > + if (parent_event) { > /* > + * Remove event from parent, to avoid race where the > + * parent concurrently iterates through its children to > + * enable, disable, or otherwise modify an event. > */ > + mutex_lock(&parent_event->child_mutex); > + list_del_init(&event->child_list); > + mutex_unlock(&parent_event->child_mutex); > } ^^^ this, right? But that's something perf_event_exit_event() alread does. So then you're worried about the order of things. > + > + perf_remove_from_context(event, !!event->parent * DETACH_GROUP); > + perf_event_exit_event(event, ctx, current, true); > } perf_event_release_kernel() first does perf_remove_from_context() and then clears the child_list, and that makes sense because if we're there, there's no external access anymore, the filedesc is gone and nobody will be iterating child_list anymore. perf_event_exit_task_context() and perf_event_exit_event() OTOH seem to rely on ctx->task == TOMBSTONE to sabotage event_function_call() such that if anybody is iterating the child_list, it'll NOP out. But here we don't have neither, and thus need to worry about the order vs child_list iteration. I suppose we should stick sync_child_event() in there as well. And at that point there's very little value in still using perf_event_exit_event()... let me see if there's something to be done about that.
On Tue, 23 Mar 2021 at 11:32, Peter Zijlstra <peterz@infradead.org> wrote: > > On Tue, Mar 23, 2021 at 10:52:41AM +0100, Marco Elver wrote: > > > with efs->func==__perf_event_enable. I believe it's sufficient to add > > > > mutex_lock(&parent_event->child_mutex); > > list_del_init(&event->child_list); > > mutex_unlock(&parent_event->child_mutex); > > > > right before removing from context. With the version I have now (below > > for completeness), extended torture with the above test results in no > > more warnings and the test also passes. > > > > > + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { > > + struct perf_event *parent_event = event->parent; > > + > > + if (!event->attr.remove_on_exec) > > continue; > > > > + if (!is_kernel_event(event)) > > + perf_remove_from_owner(event); > > > > + modified = true; > > + > > + if (parent_event) { > > /* > > + * Remove event from parent, to avoid race where the > > + * parent concurrently iterates through its children to > > + * enable, disable, or otherwise modify an event. > > */ > > + mutex_lock(&parent_event->child_mutex); > > + list_del_init(&event->child_list); > > + mutex_unlock(&parent_event->child_mutex); > > } > > ^^^ this, right? > > But that's something perf_event_exit_event() alread does. So then you're > worried about the order of things. Correct. We somehow need to prohibit the parent from doing an event_function_call() while we potentially deactivate the context with perf_remove_from_context(). > > + > > + perf_remove_from_context(event, !!event->parent * DETACH_GROUP); > > + perf_event_exit_event(event, ctx, current, true); > > } > > perf_event_release_kernel() first does perf_remove_from_context() and > then clears the child_list, and that makes sense because if we're there, > there's no external access anymore, the filedesc is gone and nobody will > be iterating child_list anymore. > > perf_event_exit_task_context() and perf_event_exit_event() OTOH seem to > rely on ctx->task == TOMBSTONE to sabotage event_function_call() such > that if anybody is iterating the child_list, it'll NOP out. > > But here we don't have neither, and thus need to worry about the order > vs child_list iteration. > > I suppose we should stick sync_child_event() in there as well. > > And at that point there's very little value in still using > perf_event_exit_event()... let me see if there's something to be done > about that. I don't mind dropping use of perf_event_exit_event() and open coding all of this. That would also avoid modifying perf_event_exit_event(). But I leave it to you what you think is nicest. Thanks, -- Marco
On Tue, Mar 23, 2021 at 11:41AM +0100, Marco Elver wrote: > On Tue, 23 Mar 2021 at 11:32, Peter Zijlstra <peterz@infradead.org> wrote: [...] > > > + if (parent_event) { > > > /* > > > + * Remove event from parent, to avoid race where the > > > + * parent concurrently iterates through its children to > > > + * enable, disable, or otherwise modify an event. > > > */ > > > + mutex_lock(&parent_event->child_mutex); > > > + list_del_init(&event->child_list); > > > + mutex_unlock(&parent_event->child_mutex); > > > } > > > > ^^^ this, right? > > > > But that's something perf_event_exit_event() alread does. So then you're > > worried about the order of things. > > Correct. We somehow need to prohibit the parent from doing an > event_function_call() while we potentially deactivate the context with > perf_remove_from_context(). > > > > + > > > + perf_remove_from_context(event, !!event->parent * DETACH_GROUP); > > > + perf_event_exit_event(event, ctx, current, true); > > > } > > > > perf_event_release_kernel() first does perf_remove_from_context() and > > then clears the child_list, and that makes sense because if we're there, > > there's no external access anymore, the filedesc is gone and nobody will > > be iterating child_list anymore. > > > > perf_event_exit_task_context() and perf_event_exit_event() OTOH seem to > > rely on ctx->task == TOMBSTONE to sabotage event_function_call() such > > that if anybody is iterating the child_list, it'll NOP out. > > > > But here we don't have neither, and thus need to worry about the order > > vs child_list iteration. > > > > I suppose we should stick sync_child_event() in there as well. > > > > And at that point there's very little value in still using > > perf_event_exit_event()... let me see if there's something to be done > > about that. > > I don't mind dropping use of perf_event_exit_event() and open coding > all of this. That would also avoid modifying perf_event_exit_event(). > > But I leave it to you what you think is nicest. I played a bit more with it, and the below would be the version without using perf_event_exit_event(). Perhaps it isn't too bad. Thanks, -- Marco ------ >8 ------ diff --git a/kernel/events/core.c b/kernel/events/core.c index aa47e111435e..288b61820dab 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2165,8 +2165,9 @@ static void perf_group_detach(struct perf_event *event) * If this is a sibling, remove it from its group. */ if (leader != event) { + leader->nr_siblings--; list_del_init(&event->sibling_list); - event->group_leader->nr_siblings--; + event->group_leader = event; goto out; } @@ -2180,8 +2181,9 @@ static void perf_group_detach(struct perf_event *event) if (sibling->event_caps & PERF_EV_CAP_SIBLING) perf_remove_sibling_event(sibling); - sibling->group_leader = sibling; + leader->nr_siblings--; list_del_init(&sibling->sibling_list); + sibling->group_leader = sibling; /* Inherit group flags from the previous leader */ sibling->group_caps = event->group_caps; @@ -2358,10 +2360,19 @@ __perf_remove_from_context(struct perf_event *event, static void perf_remove_from_context(struct perf_event *event, unsigned long flags) { struct perf_event_context *ctx = event->ctx; + bool remove; lockdep_assert_held(&ctx->mutex); - event_function_call(event, __perf_remove_from_context, (void *)flags); + /* + * There is concurrency vs remove_on_exec(). + */ + raw_spin_lock_irq(&ctx->lock); + remove = (event->attach_state & PERF_ATTACH_CONTEXT); + raw_spin_unlock_irq(&ctx->lock); + + if (remove) + event_function_call(event, __perf_remove_from_context, (void *)flags); /* * The above event_function_call() can NO-OP when it hits @@ -4196,43 +4207,86 @@ static void perf_event_enable_on_exec(int ctxn) } static void perf_remove_from_owner(struct perf_event *event); -static void perf_event_exit_event(struct perf_event *child_event, - struct perf_event_context *child_ctx, - struct task_struct *child); +static void sync_child_event(struct perf_event *child_event, + struct task_struct *child); +static void free_event(struct perf_event *event); /* * Removes all events from the current task that have been marked * remove-on-exec, and feeds their values back to parent events. */ -static void perf_event_remove_on_exec(void) +static void perf_event_remove_on_exec(int ctxn) { - int ctxn; + struct perf_event_context *ctx, *clone_ctx = NULL; + struct perf_event *event, *next; + LIST_HEAD(free_list); + unsigned long flags; + bool modified = false; - for_each_task_context_nr(ctxn) { - struct perf_event_context *ctx; - struct perf_event *event, *next; + ctx = perf_pin_task_context(current, ctxn); + if (!ctx) + return; - ctx = perf_pin_task_context(current, ctxn); - if (!ctx) + mutex_lock(&ctx->mutex); + + if (WARN_ON_ONCE(ctx->task != current)) + goto unlock; + + list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { + struct perf_event *parent_event = event->parent; + + if (!event->attr.remove_on_exec) continue; - mutex_lock(&ctx->mutex); - list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { - if (!event->attr.remove_on_exec) - continue; + if (!is_kernel_event(event)) + perf_remove_from_owner(event); + + modified = true; - if (!is_kernel_event(event)) - perf_remove_from_owner(event); - perf_remove_from_context(event, DETACH_GROUP); + if (parent_event) { /* - * Remove the event and feed back its values to the - * parent event. + * Remove event from parent *before* modifying contexts, + * to avoid race where the parent concurrently iterates + * through its children to enable, disable, or otherwise + * modify an event. */ - perf_event_exit_event(event, ctx, current); + + sync_child_event(event, current); + + WARN_ON_ONCE(parent_event->ctx->parent_ctx); + mutex_lock(&parent_event->child_mutex); + list_del_init(&event->child_list); + mutex_unlock(&parent_event->child_mutex); + + perf_event_wakeup(parent_event); + put_event(parent_event); } - mutex_unlock(&ctx->mutex); - put_ctx(ctx); + + perf_remove_from_context(event, !!event->parent * DETACH_GROUP); + + raw_spin_lock_irq(&ctx->lock); + WARN_ON_ONCE(ctx->is_active); + perf_event_set_state(event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ + raw_spin_unlock_irq(&ctx->lock); + + if (parent_event) + free_event(event); + else + perf_event_wakeup(event); } + + raw_spin_lock_irqsave(&ctx->lock, flags); + if (modified) + clone_ctx = unclone_ctx(ctx); + --ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); + +unlock: + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); + if (clone_ctx) + put_ctx(clone_ctx); } struct perf_read_data { @@ -7581,20 +7635,18 @@ void perf_event_exec(void) struct perf_event_context *ctx; int ctxn; - rcu_read_lock(); for_each_task_context_nr(ctxn) { - ctx = current->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - perf_event_enable_on_exec(ctxn); + perf_event_remove_on_exec(ctxn); - perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, - true); + rcu_read_lock(); + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) { + perf_iterate_ctx(ctx, perf_event_addr_filters_exec, + NULL, true); + } + rcu_read_unlock(); } - rcu_read_unlock(); - - perf_event_remove_on_exec(); } struct remote_output {
On Tue, Mar 23, 2021 at 11:32:03AM +0100, Peter Zijlstra wrote: > And at that point there's very little value in still using > perf_event_exit_event()... let me see if there's something to be done > about that. I ended up with something like the below. Which then simplifies remove_on_exec() to: static void perf_event_remove_on_exec(int ctxn) { struct perf_event_context *ctx, *clone_ctx = NULL; struct perf_event *event, *next; bool modified = false; unsigned long flags; ctx = perf_pin_task_context(current, ctxn); if (!ctx) return; mutex_lock(&ctx->mutex); if (WARN_ON_ONCE(ctx->task != current)) goto unlock; list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { if (!event->attr.remove_on_exec) continue; if (!is_kernel_event(event)) perf_remove_from_owner(event); modified = true; perf_event_exit_event(event, ctx); } raw_spin_lock_irqsave(&ctx->lock, flags); if (modified) clone_ctx = unclone_ctx(ctx); --ctx->pin_count; raw_spin_unlock_irqrestore(&ctx->lock, flags); unlock: mutex_unlock(&ctx->mutex); put_ctx(ctx); if (clone_ctx) put_ctx(clone_ctx); } Very lightly tested with that {1..1000} thing. --- Subject: perf: Rework perf_event_exit_event() From: Peter Zijlstra <peterz@infradead.org> Date: Tue Mar 23 15:16:06 CET 2021 Make perf_event_exit_event() more robust, such that we can use it from other contexts. Specifically the up and coming remove_on_exec. For this to work we need to address a few issues. Remove_on_exec will not destroy the entire context, so we cannot rely on TASK_TOMBSTONE to disable event_function_call() and we thus have to use perf_remove_from_context(). When using perf_remove_from_context(), there's two races to consider. The first is against close(), where we can have concurrent tear-down of the event. The second is against child_list iteration, which should not find a half baked event. To address this, teach perf_remove_from_context() to special case !ctx->is_active and about DETACH_CHILD. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- include/linux/perf_event.h | 1 kernel/events/core.c | 144 +++++++++++++++++++++++++-------------------- 2 files changed, 81 insertions(+), 64 deletions(-) --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -607,6 +607,7 @@ struct swevent_hlist { #define PERF_ATTACH_TASK_DATA 0x08 #define PERF_ATTACH_ITRACE 0x10 #define PERF_ATTACH_SCHED_CB 0x20 +#define PERF_ATTACH_CHILD 0x40 struct perf_cgroup; struct perf_buffer; --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2210,6 +2210,26 @@ static void perf_group_detach(struct per perf_event__header_size(leader); } +static void sync_child_event(struct perf_event *child_event); + +static void perf_child_detach(struct perf_event *event) +{ + struct perf_event *parent_event = event->parent; + + if (!(event->attach_state & PERF_ATTACH_CHILD)) + return; + + event->attach_state &= ~PERF_ATTACH_CHILD; + + if (WARN_ON_ONCE(!parent_event)) + return; + + lockdep_assert_held(&parent_event->child_mutex); + + sync_child_event(event); + list_del_init(&event->child_list); +} + static bool is_orphaned_event(struct perf_event *event) { return event->state == PERF_EVENT_STATE_DEAD; @@ -2317,6 +2337,7 @@ group_sched_out(struct perf_event *group } #define DETACH_GROUP 0x01UL +#define DETACH_CHILD 0x02UL /* * Cross CPU call to remove a performance event @@ -2340,6 +2361,8 @@ __perf_remove_from_context(struct perf_e event_sched_out(event, cpuctx, ctx); if (flags & DETACH_GROUP) perf_group_detach(event); + if (flags & DETACH_CHILD) + perf_child_detach(event); list_del_event(event, ctx); if (!ctx->nr_events && ctx->is_active) { @@ -2368,25 +2391,21 @@ static void perf_remove_from_context(str lockdep_assert_held(&ctx->mutex); - event_function_call(event, __perf_remove_from_context, (void *)flags); - /* - * The above event_function_call() can NO-OP when it hits - * TASK_TOMBSTONE. In that case we must already have been detached - * from the context (by perf_event_exit_event()) but the grouping - * might still be in-tact. - */ - WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); - if ((flags & DETACH_GROUP) && - (event->attach_state & PERF_ATTACH_GROUP)) { - /* - * Since in that case we cannot possibly be scheduled, simply - * detach now. - */ - raw_spin_lock_irq(&ctx->lock); - perf_group_detach(event); + * Because of perf_event_exit_task(), perf_remove_from_context() ought + * to work in the face of TASK_TOMBSTONE, unlike every other + * event_function_call() user. + */ + raw_spin_lock_irq(&ctx->lock); + if (!ctx->is_active) { + __perf_remove_from_context(event, __get_cpu_context(ctx), + ctx, (void *)flags); raw_spin_unlock_irq(&ctx->lock); + return; } + raw_spin_unlock_irq(&ctx->lock); + + event_function_call(event, __perf_remove_from_context, (void *)flags); } /* @@ -12379,14 +12398,17 @@ void perf_pmu_migrate_context(struct pmu } EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); -static void sync_child_event(struct perf_event *child_event, - struct task_struct *child) +static void sync_child_event(struct perf_event *child_event) { struct perf_event *parent_event = child_event->parent; u64 child_val; - if (child_event->attr.inherit_stat) - perf_event_read_event(child_event, child); + if (child_event->attr.inherit_stat) { + struct task_struct *task = child_event->ctx->task; + + if (task) + perf_event_read_event(child_event, task); + } child_val = perf_event_count(child_event); @@ -12401,60 +12423,53 @@ static void sync_child_event(struct perf } static void -perf_event_exit_event(struct perf_event *child_event, - struct perf_event_context *child_ctx, - struct task_struct *child) +perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) { - struct perf_event *parent_event = child_event->parent; + struct perf_event *parent_event = event->parent; + unsigned long detach_flags = 0; - /* - * Do not destroy the 'original' grouping; because of the context - * switch optimization the original events could've ended up in a - * random child task. - * - * If we were to destroy the original group, all group related - * operations would cease to function properly after this random - * child dies. - * - * Do destroy all inherited groups, we don't care about those - * and being thorough is better. - */ - raw_spin_lock_irq(&child_ctx->lock); - WARN_ON_ONCE(child_ctx->is_active); + if (parent_event) { + /* + * Do not destroy the 'original' grouping; because of the + * context switch optimization the original events could've + * ended up in a random child task. + * + * If we were to destroy the original group, all group related + * operations would cease to function properly after this + * random child dies. + * + * Do destroy all inherited groups, we don't care about those + * and being thorough is better. + */ + detach_flags = DETACH_GROUP | DETACH_CHILD; + mutex_lock(&parent_event->child_mutex); + } - if (parent_event) - perf_group_detach(child_event); - list_del_event(child_event, child_ctx); - perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ - raw_spin_unlock_irq(&child_ctx->lock); + perf_remove_from_context(event, detach_flags); + + raw_spin_lock_irq(&ctx->lock); + if (event->state > PERF_EVENT_STATE_EXIT) + perf_event_set_state(event, PERF_EVENT_STATE_EXIT); + raw_spin_unlock_irq(&ctx->lock); /* - * Parent events are governed by their filedesc, retain them. + * Child events can be freed. */ - if (!parent_event) { - perf_event_wakeup(child_event); + if (parent_event) { + mutex_unlock(&parent_event->child_mutex); + /* + * Kick perf_poll() for is_event_hup(); + */ + perf_event_wakeup(parent_event); + free_event(event); + put_event(parent_event); return; } - /* - * Child events can be cleaned up. - */ - - sync_child_event(child_event, child); /* - * Remove this event from the parent's list - */ - WARN_ON_ONCE(parent_event->ctx->parent_ctx); - mutex_lock(&parent_event->child_mutex); - list_del_init(&child_event->child_list); - mutex_unlock(&parent_event->child_mutex); - - /* - * Kick perf_poll() for is_event_hup(). + * Parent events are governed by their filedesc, retain them. */ - perf_event_wakeup(parent_event); - free_event(child_event); - put_event(parent_event); + perf_event_wakeup(event); } static void perf_event_exit_task_context(struct task_struct *child, int ctxn) @@ -12511,7 +12526,7 @@ static void perf_event_exit_task_context perf_event_task(child, child_ctx, 0); list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) - perf_event_exit_event(child_event, child_ctx, child); + perf_event_exit_event(child_event, child_ctx); mutex_unlock(&child_ctx->mutex); @@ -12771,6 +12786,7 @@ inherit_event(struct perf_event *parent_ */ raw_spin_lock_irqsave(&child_ctx->lock, flags); add_event_to_ctx(child_event, child_ctx); + child_event->attach_state |= PERF_ATTACH_CHILD; raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /*
On Tue, Mar 23, 2021 at 03:45PM +0100, Peter Zijlstra wrote: > On Tue, Mar 23, 2021 at 11:32:03AM +0100, Peter Zijlstra wrote: > > And at that point there's very little value in still using > > perf_event_exit_event()... let me see if there's something to be done > > about that. > > I ended up with something like the below. Which then simplifies > remove_on_exec() to: > [...] > > Very lightly tested with that {1..1000} thing. > > --- > > Subject: perf: Rework perf_event_exit_event() > From: Peter Zijlstra <peterz@infradead.org> > Date: Tue Mar 23 15:16:06 CET 2021 > > Make perf_event_exit_event() more robust, such that we can use it from > other contexts. Specifically the up and coming remove_on_exec. > > For this to work we need to address a few issues. Remove_on_exec will > not destroy the entire context, so we cannot rely on TASK_TOMBSTONE to > disable event_function_call() and we thus have to use > perf_remove_from_context(). > > When using perf_remove_from_context(), there's two races to consider. > The first is against close(), where we can have concurrent tear-down > of the event. The second is against child_list iteration, which should > not find a half baked event. > > To address this, teach perf_remove_from_context() to special case > !ctx->is_active and about DETACH_CHILD. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Very nice, thanks! It seems to all hold up to testing as well. Unless you already have this on some branch somewhere, I'll prepend it to the series for now. I'll test some more and try to get v3 out tomorrow. Thanks, -- Marco
On Tue, Mar 23, 2021 at 04:58:37PM +0100, Marco Elver wrote: > On Tue, Mar 23, 2021 at 03:45PM +0100, Peter Zijlstra wrote: > > On Tue, Mar 23, 2021 at 11:32:03AM +0100, Peter Zijlstra wrote: > > > And at that point there's very little value in still using > > > perf_event_exit_event()... let me see if there's something to be done > > > about that. > > > > I ended up with something like the below. Which then simplifies > > remove_on_exec() to: > > > [...] > > > > Very lightly tested with that {1..1000} thing. > > > > --- > > > > Subject: perf: Rework perf_event_exit_event() > > From: Peter Zijlstra <peterz@infradead.org> > > Date: Tue Mar 23 15:16:06 CET 2021 > > > > Make perf_event_exit_event() more robust, such that we can use it from > > other contexts. Specifically the up and coming remove_on_exec. > > > > For this to work we need to address a few issues. Remove_on_exec will > > not destroy the entire context, so we cannot rely on TASK_TOMBSTONE to > > disable event_function_call() and we thus have to use > > perf_remove_from_context(). > > > > When using perf_remove_from_context(), there's two races to consider. > > The first is against close(), where we can have concurrent tear-down > > of the event. The second is against child_list iteration, which should > > not find a half baked event. > > > > To address this, teach perf_remove_from_context() to special case > > !ctx->is_active and about DETACH_CHILD. > > > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > > Very nice, thanks! It seems to all hold up to testing as well. > > Unless you already have this on some branch somewhere, I'll prepend it > to the series for now. I'll test some more and try to get v3 out > tomorrow. I have not queued it, so please keep it in your series so it stays together (and tested). Thanks!
On Tue, Mar 23, 2021 at 10:47AM +0100, Marco Elver wrote: > On Tue, 23 Mar 2021 at 04:10, Ian Rogers <irogers@google.com> wrote: > > On Mon, Mar 22, 2021 at 6:24 AM Marco Elver <elver@google.com> wrote: > > > On Wed, Mar 10, 2021 at 11:41AM +0100, Marco Elver wrote: > > > > Add kselftest to test that remove_on_exec removes inherited events from > > > > child tasks. > > > > > > > > Signed-off-by: Marco Elver <elver@google.com> > > > > > > To make compatible with more recent libc, we'll need to fixup the tests > > > with the below. > > > > > > Also, I've seen that tools/perf/tests exists, however it seems to be > > > primarily about perf-tool related tests. Is this correct? > > > > > > I'd propose to keep these purely kernel ABI related tests separate, and > > > that way we can also make use of the kselftests framework which will > > > also integrate into various CI systems such as kernelci.org. > > > > Perhaps there is a way to have both? Having the perf tool spot an > > errant kernel feels like a feature. There are also > > tools/lib/perf/tests and Vince Weaver's tests [1]. It is possible to > > run standalone tests from within perf test by having them be executed > > by a shell test. > > Thanks for the pointers. Sure, I'd support more additional tests. > > But I had another look and it seems the tests in > tools/{perf,lib/perf}/tests do focus on perf-tool or the library > respectively, so adding kernel ABI tests there feels wrong. (If > perf-tool somehow finds use for sigtrap, or remove_on_exec, then > having a perf-tool specific test for those would make sense again.) Ok, I checked once more, and I did find a few pure kernel ABI tests e.g. in "wp.c". [...] > Because I'd much prefer in-tree tests with little boilerplate, that > are structured with parsable output; in the kernel we have the > kselftest framework for tests with a user space component, and KUnit > for pure in-kernel tests. So let's try to have both... but from what I could tell, the remove_on_exec test just can't be turned into a perf tool built-in test, at least not easily. In perf tool I also can't use the new "si_perf" field yet. I'll add the patch below at the end of the series, so that we can have both. Too many tests probably don't hurt... Thanks, -- Marco ------ >8 ------ commit 6a98611ace59c867aa135f780b1879990180548e Author: Marco Elver <elver@google.com> Date: Tue Mar 23 19:51:12 2021 +0100 perf test: Add basic stress test for sigtrap handling Ports the stress test from tools/testing/selftests/sigtrap_threads.c, and add as a perf tool built-in test. This allows checking the basic sigtrap functionality from within the perf tool. Signed-off-by: Marco Elver <elver@google.com> diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 650aec19d490..a429c7a02b37 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -64,6 +64,7 @@ perf-y += parse-metric.o perf-y += pe-file-parsing.o perf-y += expand-cgroup.o perf-y += perf-time-to-tsc.o +perf-y += sigtrap.o $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build $(call rule_mkdir) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index c4b888f18e9c..28a1cb5eaa77 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -359,6 +359,11 @@ static struct test generic_tests[] = { .func = test__perf_time_to_tsc, .is_supported = test__tsc_is_supported, }, + { + .desc = "Sigtrap support", + .func = test__sigtrap, + .is_supported = test__wp_is_supported, /* uses wp for test */ + }, { .func = NULL, }, diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c new file mode 100644 index 000000000000..0888a4e02222 --- /dev/null +++ b/tools/perf/tests/sigtrap.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic stress-test for sigtrap support. + * + * Copyright (C) 2021, Google LLC. + */ + +#include <pthread.h> +#include <signal.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/syscall.h> +#include <unistd.h> +#include <linux/hw_breakpoint.h> +#include <linux/kernel.h> +#include "tests.h" +#include "debug.h" +#include "event.h" +#include "cloexec.h" +#include "../perf-sys.h" + +#define NUM_THREADS 5 + +/* Data shared between test body, threads, and signal handler. */ +static struct { + int tids_want_signal; /* Which threads still want a signal. */ + int signal_count; /* Sanity check number of signals received. */ + volatile int iterate_on; /* Variable to set breakpoint on. */ + siginfo_t first_siginfo; /* First observed siginfo_t. */ +} ctx; + +static struct perf_event_attr make_event_attr(void) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_BREAKPOINT, + .size = sizeof(attr), + .sample_period = 1, + .disabled = 1, + .bp_addr = (long)&ctx.iterate_on, + .bp_type = HW_BREAKPOINT_RW, + .bp_len = HW_BREAKPOINT_LEN_1, + .inherit = 1, /* Children inherit events ... */ + .inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */ + .remove_on_exec = 1, /* Required by sigtrap. */ + .sigtrap = 1, /* Request synchronous SIGTRAP on event. */ + }; + return attr; +} + +static void +sigtrap_handler(int signum __maybe_unused, siginfo_t *info, void *ucontext __maybe_unused) +{ + if (!__atomic_fetch_add(&ctx.signal_count, 1, __ATOMIC_RELAXED)) + ctx.first_siginfo = *info; + __atomic_fetch_sub(&ctx.tids_want_signal, syscall(SYS_gettid), __ATOMIC_RELAXED); +} + +static void *test_thread(void *arg) +{ + pthread_barrier_t *barrier = (pthread_barrier_t *)arg; + pid_t tid = syscall(SYS_gettid); + int i; + + pthread_barrier_wait(barrier); + + __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); + for (i = 0; i < ctx.iterate_on - 1; i++) + __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); + + return NULL; +} + +static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier) +{ + int i; + + pthread_barrier_wait(barrier); + for (i = 0; i < NUM_THREADS; i++) + TEST_ASSERT_EQUAL("pthread_join() failed", pthread_join(threads[i], NULL), 0); + + return 0; +} + +static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier) +{ + ctx.iterate_on = 3000; + + TEST_ASSERT_EQUAL("misfired signal?", ctx.signal_count, 0); + TEST_ASSERT_EQUAL("enable failed", ioctl(fd, PERF_EVENT_IOC_ENABLE, 0), 0); + if (run_test_threads(threads, barrier)) + return -1; + TEST_ASSERT_EQUAL("disable failed", ioctl(fd, PERF_EVENT_IOC_DISABLE, 0), 0); + + TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, NUM_THREADS * ctx.iterate_on); + TEST_ASSERT_EQUAL("missing signals or incorrectly delivered", ctx.tids_want_signal, 0); + TEST_ASSERT_VAL("unexpected si_addr", ctx.first_siginfo.si_addr == &ctx.iterate_on); + TEST_ASSERT_EQUAL("unexpected si_errno", ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); +#if 0 /* FIXME: test build and enable when libc's signal.h has si_perf. */ + TEST_ASSERT_VAL("unexpected si_perf", ctx.first_siginfo.si_perf == + ((HW_BREAKPOINT_LEN_1 << 16) | HW_BREAKPOINT_RW)); +#endif + + return 0; +} + +int test__sigtrap(struct test *test __maybe_unused, int subtest __maybe_unused) +{ + struct perf_event_attr attr = make_event_attr(); + struct sigaction action = {}; + struct sigaction oldact; + pthread_t threads[NUM_THREADS]; + pthread_barrier_t barrier; + int i, fd, ret = 0; + + pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1); + + action.sa_flags = SA_SIGINFO | SA_NODEFER; + action.sa_sigaction = sigtrap_handler; + sigemptyset(&action.sa_mask); + if (sigaction(SIGTRAP, &action, &oldact)) { + pr_debug("FAILED sigaction()\n"); + ret = -1; + goto out_sigaction; + } + + + fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag()); + if (fd < 0) { + pr_debug("FAILED sys_perf_event_open()\n"); + ret = -1; + goto out_sigaction; + } + + /* Spawn threads inheriting perf event. */ + for (i = 0; i < NUM_THREADS; i++) { + if (pthread_create(&threads[i], NULL, test_thread, &barrier)) { + pr_debug("FAILED pthread_create()"); + ret = -1; + goto out_perf_event; + } + } + + ret |= run_stress_test(fd, threads, &barrier); + +out_perf_event: + close(fd); +out_sigaction: + sigaction(SIGTRAP, &oldact, NULL); + pthread_barrier_destroy(&barrier); + return ret; +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index b85f005308a3..c3f2e2ecbfd6 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -127,6 +127,7 @@ int test__parse_metric(struct test *test, int subtest); int test__pe_file_parsing(struct test *test, int subtest); int test__expand_cgroup_events(struct test *test, int subtest); int test__perf_time_to_tsc(struct test *test, int subtest); +int test__sigtrap(struct test *test, int subtest); bool test__bp_signal_is_supported(void); bool test__bp_account_is_supported(void);