@@ -46,6 +46,35 @@ static struct ufshpb_driver ufshpb_drv;
static int ufshpb_create_sysfs(struct ufs_hba *hba, struct ufshpb_lu *hpb);
+static inline int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
+ struct ufshpb_subregion *srgn)
+{
+ return rgn->rgn_state != HPB_RGN_INACTIVE &&
+ srgn->srgn_state == HPB_SRGN_CLEAN;
+}
+
+static inline bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
+{
+ if (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_16)
+ return true;
+
+ return false;
+}
+
+static inline bool ufshpb_is_write_discard_cmd(struct scsi_cmnd *cmd)
+{
+ if (cmd->cmnd[0] == WRITE_10 || cmd->cmnd[0] == WRITE_16 ||
+ cmd->cmnd[0] == UNMAP)
+ return true;
+
+ return false;
+}
+
+static inline bool ufshpb_is_support_chunk(int transfer_len)
+{
+ return transfer_len <= HPB_MULTI_CHUNK_HIGH;
+}
+
static inline bool ufshpb_is_general_lun(int lun)
{
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
@@ -137,6 +166,225 @@ static inline void ufshpb_lu_put(struct ufshpb_lu *hpb)
put_device(&hpb->hpb_lu_dev);
}
+static inline u32 ufshpb_get_lpn(struct scsi_cmnd *cmnd)
+{
+ return blk_rq_pos(cmnd->request) >>
+ (ilog2(cmnd->device->sector_size) - 9);
+}
+
+static inline unsigned int ufshpb_get_len(struct scsi_cmnd *cmnd)
+{
+ return blk_rq_sectors(cmnd->request) >>
+ (ilog2(cmnd->device->sector_size) - 9);
+}
+
+static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx, int srgn_offset, int cnt)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int set_bit_len;
+ int bitmap_len = hpb->entries_per_srgn;
+
+next_srgn:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if ((srgn_offset + cnt) > bitmap_len)
+ set_bit_len = bitmap_len - srgn_offset;
+ else
+ set_bit_len = cnt;
+
+ if (rgn->rgn_state != HPB_RGN_INACTIVE)
+ bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len);
+
+ srgn_offset = 0;
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+
+ cnt -= set_bit_len;
+ if (cnt > 0)
+ goto next_srgn;
+
+ WARN_ON(cnt < 0);
+}
+
+static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
+ int srgn_idx, int srgn_offset, int cnt)
+{
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ int bitmap_len = hpb->entries_per_srgn;
+ int i, bit_len;
+
+next_srgn:
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ if (!ufshpb_is_valid_srgn(rgn, srgn))
+ return true;
+
+ /*
+ * If the region state is active, mctx must be allocated.
+ * In this case, check whether the region is evicted or
+ * mctx allcation fail.
+ */
+ WARN_ON(!srgn->mctx);
+
+ if ((srgn_offset + cnt) > bitmap_len)
+ bit_len = bitmap_len - srgn_offset;
+ else
+ bit_len = cnt;
+
+ for (i = 0; i < bit_len; i++) {
+ if (test_bit(srgn_offset + i, srgn->mctx->ppn_dirty))
+ return true;
+ }
+
+ srgn_offset = 0;
+ if (++srgn_idx == hpb->srgns_per_rgn) {
+ srgn_idx = 0;
+ rgn_idx++;
+ }
+
+ cnt -= bit_len;
+ if (cnt > 0)
+ goto next_srgn;
+
+ return false;
+}
+
+static u64 ufshpb_get_ppn(struct ufshpb_lu *hpb,
+ struct ufshpb_map_ctx *mctx, int pos, int *error)
+{
+ u64 *ppn_table;
+ struct page *page;
+ int index, offset;
+
+ index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
+ offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
+
+ page = mctx->m_page[index];
+ if (unlikely(!page)) {
+ *error = -ENOMEM;
+ dev_err(&hpb->hpb_lu_dev,
+ "error. cannot find page in mctx\n");
+ return 0;
+ }
+
+ ppn_table = page_address(page);
+ if (unlikely(!ppn_table)) {
+ *error = -ENOMEM;
+ dev_err(&hpb->hpb_lu_dev, "error. cannot get ppn_table\n");
+ return 0;
+ }
+
+ return ppn_table[offset];
+}
+
+static inline void
+ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
+ int *srgn_idx, int *offset)
+{
+ int rgn_offset;
+
+ *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
+ rgn_offset = lpn & hpb->entries_per_rgn_mask;
+ *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
+ *offset = rgn_offset & hpb->entries_per_srgn_mask;
+}
+
+static void
+ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu *hpb, struct ufshcd_lrb *lrbp,
+ u32 lpn, u64 ppn, unsigned int transfer_len)
+{
+ unsigned char *cdb = lrbp->ucd_req_ptr->sc.cdb;
+
+ cdb[0] = UFSHPB_READ;
+
+ put_unaligned_be32(lpn, &cdb[2]);
+ put_unaligned_be64(ppn, &cdb[6]);
+ cdb[14] = transfer_len;
+}
+
+/* routine : READ10 -> HPB_READ */
+static void ufshpb_prep_fn(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufshpb_lu *hpb;
+ struct ufshpb_region *rgn;
+ struct ufshpb_subregion *srgn;
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ u32 lpn;
+ u64 ppn;
+ unsigned long flags;
+ int transfer_len, rgn_idx, srgn_idx, srgn_offset;
+ int err = 0;
+
+ hpb = ufshpb_get_hpb_data(cmd);
+ err = ufshpb_lu_get(hpb);
+ if (unlikely(err))
+ return;
+
+ WARN_ON(hpb->lun != cmd->device->lun);
+ if (!ufshpb_is_write_discard_cmd(cmd) &&
+ !ufshpb_is_read_cmd(cmd))
+ goto put_hpb;
+
+ transfer_len = ufshpb_get_len(cmd);
+ if (unlikely(!transfer_len))
+ goto put_hpb;
+
+ lpn = ufshpb_get_lpn(cmd);
+ ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+ rgn = hpb->rgn_tbl + rgn_idx;
+ srgn = rgn->srgn_tbl + srgn_idx;
+
+ /* If commnad type is WRITE and DISCARD, set bitmap as drity */
+ if (ufshpb_is_write_discard_cmd(cmd)) {
+ spin_lock_irqsave(&hpb->hpb_state_lock, flags);
+ ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len);
+ spin_unlock_irqrestore(&hpb->hpb_state_lock, flags);
+ goto put_hpb;
+ }
+
+ WARN_ON(!ufshpb_is_read_cmd(cmd));
+
+ if (!ufshpb_is_support_chunk(transfer_len))
+ goto put_hpb;
+
+ spin_lock_irqsave(&hpb->hpb_state_lock, flags);
+ if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
+ transfer_len)) {
+ atomic_inc(&hpb->stats.miss_cnt);
+ spin_unlock_irqrestore(&hpb->hpb_state_lock, flags);
+ goto put_hpb;
+ }
+
+ ppn = ufshpb_get_ppn(hpb, srgn->mctx, srgn_offset, &err);
+ spin_unlock_irqrestore(&hpb->hpb_state_lock, flags);
+ if (unlikely(err)) {
+ /*
+ * In this case, the region state is active,
+ * but the ppn table is not allocated.
+ * Make sure that ppn tabie must be allocated on
+ * active state
+ */
+ WARN_ON(true);
+ dev_err(&hpb->hpb_lu_dev,
+ "ufshpb_get_ppn failed. err %d\n", err);
+ goto put_hpb;
+ }
+
+ ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len);
+
+ atomic_inc(&hpb->stats.hit_cnt);
+put_hpb:
+ ufshpb_lu_put(hpb);
+}
+
static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
struct ufshpb_subregion *srgn)
{
@@ -1688,6 +1936,7 @@ static struct ufshpb_driver ufshpb_drv = {
.bus = &ufsf_bus_type,
},
.ufshpb_ops = {
+ .prep_fn = ufshpb_prep_fn,
.reset = ufshpb_reset,
.reset_host = ufshpb_reset_host,
.suspend = ufshpb_suspend,
This patch changes the read I/O to the HPB read I/O. If the logical address of the read I/O belongs to active sub-region, the HPB driver modifies the read I/O command to HPB read. It modifies the upiu command of UFS instead of modifying the existing SCSI command. In the HPB version 1.0, the maximum read I/O size that can be converted to HPB read is 4KB. The dirty map of the active sub-region prevents an incorrect HPB read that has stale physical page number which is updated by previous write I/O. Signed-off-by: Daejun Park <daejun7.park@samsung.com> --- drivers/scsi/ufs/ufshpb.c | 249 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 249 insertions(+)