diff mbox series

[v3,1/9] block: Introduce QUEUE_FLAG_SUB_PAGE_SEGMENTS and CONFIG_BLK_SUB_PAGE_SEGMENTS

Message ID 20230118225447.2809787-2-bvanassche@acm.org
State New
Headers show
Series Add support for segments smaller than one page | expand

Commit Message

Bart Van Assche Jan. 18, 2023, 10:54 p.m. UTC
Prepare for introducing support for segments smaller than the page size
by introducing the request queue flag QUEUE_FLAG_SUB_PAGE_SEGMENTS.
Introduce CONFIG_BLK_SUB_PAGE_SEGMENTS to prevent that performance of
block drivers that support segments >= PAGE_SIZE would be affected.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Keith Busch <kbusch@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/Kconfig          | 9 +++++++++
 include/linux/blkdev.h | 7 +++++++
 2 files changed, 16 insertions(+)

Comments

Bart Van Assche Jan. 21, 2023, 12:11 a.m. UTC | #1
On 1/18/23 15:02, Jens Axboe wrote:
> On 1/18/23 3:54 PM, Bart Van Assche wrote:
>> Prepare for introducing support for segments smaller than the page size
>> by introducing the request queue flag QUEUE_FLAG_SUB_PAGE_SEGMENTS.
>> Introduce CONFIG_BLK_SUB_PAGE_SEGMENTS to prevent that performance of
>> block drivers that support segments >= PAGE_SIZE would be affected.
>>
>> Cc: Christoph Hellwig <hch@lst.de>
>> Cc: Ming Lei <ming.lei@redhat.com>
>> Cc: Keith Busch <kbusch@kernel.org>
>> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
>> ---
>>   block/Kconfig          | 9 +++++++++
>>   include/linux/blkdev.h | 7 +++++++
>>   2 files changed, 16 insertions(+)
>>
>> diff --git a/block/Kconfig b/block/Kconfig
>> index 5d9d9c84d516..e85061d2175b 100644
>> --- a/block/Kconfig
>> +++ b/block/Kconfig
>> @@ -35,6 +35,15 @@ config BLOCK_LEGACY_AUTOLOAD
>>   	  created on demand, but scripts that manually create device nodes and
>>   	  then call losetup might rely on this behavior.
>>   
>> +config BLK_SUB_PAGE_SEGMENTS
>> +       bool "Support segments smaller than the page size"
>> +       default n
>> +       help
>> +	  Most storage controllers support DMA segments larger than the typical
>> +	  size of a virtual memory page. Some embedded controllers only support
>> +	  DMA segments smaller than the page size. Enable this option to support
>> +	  such controllers.
> 
> This should not be a visible option at all, affected drivers should just
> select it.

Hi Jens,

If CONFIG_BLK_SUB_PAGE_SEGMENTS is made invisible, how should this 
option be enabled for the scsi_debug and null_blk drivers? Adding 
"select BLK_SUB_PAGE_SEGMENTS" to the Kconfig section of these drivers 
would have the unfortunate side effect that enabling either driver would 
make all block drivers slower. How about making sub-page segment support 
configurable for the scsi_debug and null_blk drivers only? That would 
allow kernel developers who want to test the sub-page segment support to 
enable this functionality without making e.g. distro kernels slower.

Thanks,

Bart.
diff mbox series

Patch

diff --git a/block/Kconfig b/block/Kconfig
index 5d9d9c84d516..e85061d2175b 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -35,6 +35,15 @@  config BLOCK_LEGACY_AUTOLOAD
 	  created on demand, but scripts that manually create device nodes and
 	  then call losetup might rely on this behavior.
 
+config BLK_SUB_PAGE_SEGMENTS
+       bool "Support segments smaller than the page size"
+       default n
+       help
+	  Most storage controllers support DMA segments larger than the typical
+	  size of a virtual memory page. Some embedded controllers only support
+	  DMA segments smaller than the page size. Enable this option to support
+	  such controllers.
+
 config BLK_RQ_ALLOC_TIME
 	bool
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 89f51d68c68a..6cbb22fb93ee 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -547,6 +547,7 @@  struct request_queue {
 /* Keep blk_queue_flag_name[] in sync with the definitions below */
 #define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
 #define QUEUE_FLAG_DYING	1	/* queue being torn down */
+#define QUEUE_FLAG_SUB_PAGE_SEGMENTS 2	/* segments smaller than one page */
 #define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
 #define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
@@ -613,6 +614,12 @@  bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 #define blk_queue_sq_sched(q)	test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
 #define blk_queue_skip_tagset_quiesce(q) \
 	test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
+#ifdef CONFIG_BLK_SUB_PAGE_SEGMENTS
+#define blk_queue_sub_page_segments(q)				\
+	test_bit(QUEUE_FLAG_SUB_PAGE_SEGMENTS, &(q)->queue_flags)
+#else
+#define blk_queue_sub_page_segments(q) false
+#endif
 
 extern void blk_set_pm_only(struct request_queue *q);
 extern void blk_clear_pm_only(struct request_queue *q);