@@ -320,6 +320,9 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
status = SAM_STAT_GOOD;
target_complete_cmd(cmd, status);
+
+ if (ibr->pg)
+ __free_page(ibr->pg);
kfree(ibr);
}
@@ -444,13 +447,17 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
}
static sense_reason_t
-iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
+iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd,
+ bool ndob)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *sg = &cmd->t_data_sg[0];
unsigned char *buf, *not_zero;
int ret;
+ if (ndob)
+ goto issue_zero;
+
buf = kmap(sg_page(sg)) + sg->offset;
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -464,6 +471,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
if (not_zero)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+issue_zero:
ret = blkdev_issue_zeroout(bdev,
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
@@ -481,13 +489,15 @@ iblock_execute_write_same(struct se_cmd *cmd)
{
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
struct iblock_req *ibr;
- struct scatterlist *sg;
+ struct scatterlist *sg, ndob_sg;
+ struct page *pg = NULL;
struct bio *bio;
struct bio_list list;
struct se_device *dev = cmd->se_dev;
sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
sector_t sectors = target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd));
+ bool ndob = cmd->t_task_cdb[1] & 0x01;
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -497,7 +507,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
- sg->length != cmd->se_dev->dev_attrib.block_size) {
+ (sg && sg->length != cmd->se_dev->dev_attrib.block_size)) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
" block_size: %u\n", cmd->t_data_nents, sg->length,
cmd->se_dev->dev_attrib.block_size);
@@ -505,14 +515,26 @@ iblock_execute_write_same(struct se_cmd *cmd)
}
if (bdev_write_zeroes_sectors(bdev)) {
- if (!iblock_execute_zero_out(bdev, cmd))
+ if (!iblock_execute_zero_out(bdev, cmd, ndob))
return 0;
}
+ if (ndob) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ sg_init_table(&ndob_sg, 1);
+ sg_set_page(&ndob_sg, pg, cmd->se_dev->dev_attrib.block_size,
+ 0);
+ sg = &ndob_sg;
+ }
+
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
- goto fail;
+ goto fail_free_pg;
cmd->priv = ibr;
+ ibr->pg = pg;
bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
@@ -548,7 +570,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
bio_put(bio);
fail_free_ibr:
kfree(ibr);
-fail:
+fail_free_pg:
+ if (pg)
+ __free_page(pg);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -14,6 +14,7 @@
struct iblock_req {
refcount_t pending;
atomic_t ib_bio_err_cnt;
+ struct page *pg;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
If NDOB is set we don't have a buffer and we will crash when we access the t_data_sg. This has us allocate a page to use for the data buffer that gets passed to the block layer. For the iblock_execute_zero_out case we just add a check for if NDOB was set. Signed-off-by: Mike Christie <michael.christie@oracle.com> --- drivers/target/target_core_iblock.c | 36 ++++++++++++++++++++++++----- drivers/target/target_core_iblock.h | 1 + 2 files changed, 31 insertions(+), 6 deletions(-)