@@ -797,13 +797,15 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
intel_psr_wait_for_idle_locked(crtc_state);
- local_irq_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
intel_vblank_evade(&evade);
drm_crtc_vblank_put(&crtc->base);
} else {
- local_irq_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
}
if (new_plane_state->uapi.visible) {
@@ -813,7 +815,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
intel_plane_disable_arm(plane, crtc_state);
}
- local_irq_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_enable();
intel_psr_unlock(crtc_state);
@@ -739,7 +739,6 @@ struct bpf_nh_params {
#define BPF_RI_F_CPU_MAP_INIT BIT(2)
#define BPF_RI_F_DEV_MAP_INIT BIT(3)
#define BPF_RI_F_XSK_MAP_INIT BIT(4)
-#define BPF_RI_F_SEG6_STATE BIT(5)
struct bpf_redirect_info {
u64 tgt_index;
@@ -857,29 +856,6 @@ static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_ma
*lh_xsk = lh;
}
-static inline bool bpf_net_ctx_seg6_state_avail(void)
-{
- struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
-
- if (!bpf_net_ctx)
- return false;
- return bpf_net_ctx->ri.kern_flags & BPF_RI_F_SEG6_STATE;
-}
-
-static inline void bpf_net_ctx_seg6_state_set(void)
-{
- struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
-
- bpf_net_ctx->ri.kern_flags |= BPF_RI_F_SEG6_STATE;
-}
-
-static inline void bpf_net_ctx_seg6_state_clr(void)
-{
- struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
-
- bpf_net_ctx->ri.kern_flags &= ~BPF_RI_F_SEG6_STATE;
-}
-
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!)
@@ -1 +1 @@
--rt13
+-rt14
@@ -6459,8 +6459,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
void *srh_tlvs, *srh_end, *ptr;
int srhoff = 0;
- if (!bpf_net_ctx_seg6_state_avail())
- return -EINVAL;
lockdep_assert_held(&srh_state->bh_lock);
if (srh == NULL)
return -EINVAL;
@@ -6518,8 +6516,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
int hdroff = 0;
int err;
- if (!bpf_net_ctx_seg6_state_avail())
- return -EINVAL;
lockdep_assert_held(&srh_state->bh_lock);
switch (action) {
case SEG6_LOCAL_ACTION_END_X:
@@ -6597,8 +6593,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
int srhoff = 0;
int ret;
- if (!bpf_net_ctx_seg6_state_avail())
- return -EINVAL;
lockdep_assert_held(&srh_state->bh_lock);
if (unlikely(srh == NULL))
return -EINVAL;
@@ -11053,7 +11047,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
};
const struct bpf_prog_ops lwt_seg6local_prog_ops = {
- .test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops cg_sock_verifier_ops = {
@@ -1429,7 +1429,6 @@ static int input_action_end_bpf(struct sk_buff *skb,
* bpf_prog_run_save_cb().
*/
local_lock_nested_bh(&seg6_bpf_srh_states.bh_lock);
- bpf_net_ctx_seg6_state_set();
srh_state = this_cpu_ptr(&seg6_bpf_srh_states);
srh_state->srh = srh;
srh_state->hdrlen = srh->hdrlen << 3;
@@ -1453,7 +1452,6 @@ static int input_action_end_bpf(struct sk_buff *skb,
if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
goto drop;
- bpf_net_ctx_seg6_state_clr();
local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
if (ret != BPF_REDIRECT)
@@ -1462,7 +1460,6 @@ static int input_action_end_bpf(struct sk_buff *skb,
return dst_input(skb);
drop:
- bpf_net_ctx_seg6_state_clr();
local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
kfree_skb(skb);
return -EINVAL;