@@ -57,14 +57,6 @@ static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent);
-static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
-{
- size_t index;
-
- for (index = 0; index < num_pages; index++)
- spin_lock_init(&zram->table[index].lock);
-}
-
static int zram_slot_trylock(struct zram *zram, u32 index)
{
return spin_trylock(&zram->table[index].lock);
@@ -1219,7 +1211,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
{
- size_t num_pages;
+ size_t num_pages, index;
num_pages = disksize >> PAGE_SHIFT;
zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
@@ -1234,7 +1226,9 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
- zram_meta_init_table_locks(zram, num_pages);
+
+ for (index = 0; index < num_pages; index++)
+ spin_lock_init(&zram->table[index].lock);
return true;
}
@@ -1259,6 +1259,9 @@ void nbcon_kthreads_wake(void)
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
+ if (!(console_srcu_read_flags(con) & CON_NBCON))
+ continue;
+
/*
* Only schedule irq_work if the printing thread is
* actively waiting. If not waiting, the thread will
@@ -1292,8 +1295,14 @@ void nbcon_kthread_stop(struct console *con)
* Return: True if the kthread was started or already exists.
* Otherwise false and @con must not be registered.
*
- * If @con was already registered, it must be unregistered before
- * the global state variable @printk_kthreads_running can be set.
+ * This function is called when it will be expected that nbcon consoles are
+ * flushed using the kthread. The messages printed with NBCON_PRIO_NORMAL
+ * will be no longer flushed by the legacy loop. This is why failure must
+ * be fatal for console registration.
+ *
+ * If @con was already registered and this function fails, @con must be
+ * unregistered before the global state variable @printk_kthreads_running
+ * can be set.
*/
bool nbcon_kthread_create(struct console *con)
{
@@ -1347,8 +1356,7 @@ static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
if (!printk_percpu_data_ready())
return &early_nbcon_pcpu_emergency_nesting;
- /* Open code this_cpu_ptr() without checking migration. */
- return per_cpu_ptr(&nbcon_pcpu_emergency_nesting, raw_smp_processor_id());
+ return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);
}
/**
@@ -1696,6 +1704,7 @@ bool nbcon_alloc(struct console *con)
if (printk_kthreads_running) {
if (!nbcon_kthread_create(con)) {
kfree(con->pbufs);
+ con->pbufs = NULL;
return false;
}
}
@@ -2451,7 +2451,6 @@ static u64 syslog_seq;
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
-static inline void legacy_kthread_wake(void) { }
#endif /* CONFIG_PRINTK */
@@ -3162,10 +3161,11 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
bool progress;
/*
- * console_flush_all() is only for legacy consoles when
- * the nbcon consoles have their printer threads.
+ * console_flush_all() is only responsible for nbcon
+ * consoles when the nbcon consoles cannot print via
+ * their atomic or threaded flushing.
*/
- if ((flags & CON_NBCON) && ft.nbcon_offload)
+ if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
continue;
if (!console_is_usable(con, flags, !do_cond_resched))
@@ -3507,7 +3507,7 @@ void console_start(struct console *console)
printk_get_console_flush_type(&ft);
if (is_nbcon && ft.nbcon_offload)
nbcon_kthread_wake(console);
- else if (!is_nbcon && ft.legacy_offload)
+ else if (ft.legacy_offload)
defer_console_output();
__pr_flush(console, 1000, true);
@@ -3540,10 +3540,11 @@ static bool legacy_kthread_should_wakeup(void)
u64 printk_seq;
/*
- * The legacy printer thread is only for legacy consoles when
- * the nbcon consoles have their printer threads.
+ * The legacy printer thread is only responsible for nbcon
+ * consoles when the nbcon consoles cannot print via their
+ * atomic or threaded flushing.
*/
- if ((flags & CON_NBCON) && ft.nbcon_offload)
+ if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
continue;
if (!console_is_usable(con, flags, false))
@@ -4343,6 +4344,8 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
/* Flush the consoles so that records up to @seq are printed. */
printk_get_console_flush_type(&ft);
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
if (ft.legacy_direct) {
console_lock();
console_unlock();
@@ -4446,19 +4449,14 @@ static bool pr_flush(int timeout_ms, bool reset_on_progress)
static DEFINE_PER_CPU(int, printk_pending);
-static void legacy_kthread_wake(void)
-{
- if (printk_legacy_kthread)
- wake_up_interruptible(&legacy_wait);
-}
-
static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
if (force_legacy_kthread()) {
- legacy_kthread_wake();
+ if (printk_legacy_kthread)
+ wake_up_interruptible(&legacy_wait);
} else {
if (console_trylock())
console_unlock();
@@ -4873,6 +4871,8 @@ void console_try_replay_all(void)
printk_get_console_flush_type(&ft);
if (console_trylock()) {
__console_rewind_all();
+ if (ft.nbcon_atomic)
+ nbcon_atomic_flush_pending();
if (ft.nbcon_offload)
nbcon_kthreads_wake();
if (ft.legacy_offload)
@@ -4,6 +4,7 @@
#define _KERNEL_PRINTK_RINGBUFFER_H
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/dev_printk.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -122,7 +123,7 @@ enum desc_state {
#define _DATA_SIZE(sz_bits) (1UL << (sz_bits))
#define _DESCS_COUNT(ct_bits) (1U << (ct_bits))
-#define DESC_SV_BITS (sizeof(unsigned long) * 8)
+#define DESC_SV_BITS BITS_PER_LONG
#define DESC_FLAGS_SHIFT (DESC_SV_BITS - 2)
#define DESC_FLAGS_MASK (3UL << DESC_FLAGS_SHIFT)
#define DESC_STATE(sv) (3UL & (sv >> DESC_FLAGS_SHIFT))
@@ -1 +1 @@
--rt6
+-rt7