@@ -663,6 +663,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
void tasklet_unlock(struct tasklet_struct *t);
void tasklet_unlock_wait(struct tasklet_struct *t);
void tasklet_unlock_spin_wait(struct tasklet_struct *t);
+
#else
static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
static inline void tasklet_unlock(struct tasklet_struct *t) { }
@@ -693,8 +694,8 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
}
/*
- * Do not use in new code. There is no real reason to invoke this from
- * atomic contexts.
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
*/
static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
{
@@ -830,8 +830,8 @@ EXPORT_SYMBOL(tasklet_init);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
/*
- * Do not use in new code. There is no real reason to invoke this from
- * atomic contexts.
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
*/
void tasklet_unlock_spin_wait(struct tasklet_struct *t)
{
@@ -1 +1 @@
--rt33
+-rt34
@@ -4697,6 +4697,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
struct location *l;
int order;
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC)
+ return 0;
+
order = get_order(sizeof(struct location) * max);
l = (void *)__get_free_pages(flags, order);