diff mbox series

[net-next,09/11] bnxt_en: update all firmware calls to use the new APIs

Message ID 1630187910-22252-10-git-send-email-michael.chan@broadcom.com
State New
Headers show
Series bnxt_en: Implement new driver APIs to send FW messages | expand

Commit Message

Michael Chan Aug. 28, 2021, 9:58 p.m. UTC
From: Edwin Peer <edwin.peer@broadcom.com>

The conversion follows this general pattern for most of the calls:

1. The input message is changed from a stack variable initialized
using bnxt_hwrm_cmd_hdr_init() to a pointer allocated and intialized
using hwrm_req_init().

2. If we don't need to read the firmware response, the hwrm_send_message()
call is replaced with hwrm_req_send().

3. If we need to read the firmware response, the mutex lock is replaced
by hwrm_req_hold() to hold the response.  When the response is read, the
mutex unlock is replaced by hwrm_req_drop().

If additional DMA buffers are needed for firmware response data, the
hwrm_req_dma_slice() is used instead of calling dma_alloc_coherent().

Some minor refactoring is also done while doing these conversions.

Signed-off-by: Edwin Peer <edwin.peer@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 1743 +++++++++--------
 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c |  184 +-
 .../net/ethernet/broadcom/bnxt/bnxt_devlink.c |   80 +-
 .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c |  547 +++---
 drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c |  129 +-
 .../net/ethernet/broadcom/bnxt/bnxt_sriov.c   |  452 +++--
 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c  |  263 +--
 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |   30 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c |   61 +-
 9 files changed, 1950 insertions(+), 1539 deletions(-)

Comments

kernel test robot Aug. 29, 2021, 3:49 a.m. UTC | #1
Hi Michael,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Michael-Chan/bnxt_en-Implement-new-driver-APIs-to-send-FW-messages/20210829-060031
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git c77225119daa0ca0a9c6c007945c0a87b3e4a2e8
config: i386-randconfig-a002-20210829 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 4e1a164d7bd53653f79decc121afe784d2fde0a7)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/0202eb069086d82c68032e9f5db76e0b4cd5a4ca
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Michael-Chan/bnxt_en-Implement-new-driver-APIs-to-send-FW-messages/20210829-060031
        git checkout 0202eb069086d82c68032e9f5db76e0b4cd5a4ca
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   clang-14: warning: optimization flag '-falign-jumps=0' is not supported [-Wignored-optimization-argument]
   In file included from drivers/net/ethernet/broadcom/bnxt/bnxt.c:11:
   In file included from include/linux/module.h:14:
   In file included from include/linux/buildid.h:5:
   In file included from include/linux/mm_types.h:9:
   In file included from include/linux/spinlock.h:51:
   In file included from include/linux/preempt.h:78:
   In file included from arch/x86/include/asm/preempt.h:7:
   In file included from include/linux/thread_info.h:60:
   arch/x86/include/asm/thread_info.h:172:13: warning: calling '__builtin_frame_address' with a nonzero argument is unsafe [-Wframe-address]
           oldframe = __builtin_frame_address(1);
                      ^~~~~~~~~~~~~~~~~~~~~~~~~~
   arch/x86/include/asm/thread_info.h:174:11: warning: calling '__builtin_frame_address' with a nonzero argument is unsafe [-Wframe-address]
                   frame = __builtin_frame_address(2);
                           ^~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/broadcom/bnxt/bnxt.c:5996:2: warning: variable 'req' is uninitialized when used here [-Wuninitialized]

           req->fid = cpu_to_le16(fid);
           ^~~
   drivers/net/ethernet/broadcom/bnxt/bnxt.c:5990:34: note: initialize the variable 'req' to silence this warning
           struct hwrm_func_qcfg_input *req;
                                           ^
                                            = NULL
   3 warnings generated.
--
   clang-14: warning: optimization flag '-falign-jumps=0' is not supported [-Wignored-optimization-argument]
   In file included from drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c:11:
   In file included from include/linux/ethtool.h:17:
   In file included from include/linux/compat.h:14:
   In file included from include/linux/sem.h:5:
   In file included from include/uapi/linux/sem.h:5:
   In file included from include/linux/ipc.h:5:
   In file included from include/linux/spinlock.h:51:
   In file included from include/linux/preempt.h:78:
   In file included from arch/x86/include/asm/preempt.h:7:
   In file included from include/linux/thread_info.h:60:
   arch/x86/include/asm/thread_info.h:172:13: warning: calling '__builtin_frame_address' with a nonzero argument is unsafe [-Wframe-address]
           oldframe = __builtin_frame_address(1);
                      ^~~~~~~~~~~~~~~~~~~~~~~~~~
   arch/x86/include/asm/thread_info.h:174:11: warning: calling '__builtin_frame_address' with a nonzero argument is unsafe [-Wframe-address]
                   frame = __builtin_frame_address(2);
                           ^~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c:1163:7: warning: variable 'rc' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]

                   if (is_valid_ether_addr(bp->vf.mac_addr))
                       ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c:1178:6: note: uninitialized use occurs here
           if (rc && strict) {
               ^~
   drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c:1163:3: note: remove the 'if' if its condition is always true
                   if (is_valid_ether_addr(bp->vf.mac_addr))
                   ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c:1157:8: note: initialize the variable 'rc' to silence this warning
           int rc;
                 ^
                  = 0
   3 warnings generated.


vim +/req +5996 drivers/net/ethernet/broadcom/bnxt/bnxt.c

  5986	
  5987	int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
  5988	{
  5989		struct hwrm_func_qcfg_output *resp;
  5990		struct hwrm_func_qcfg_input *req;
  5991		int rc;
  5992	
  5993		if (bp->hwrm_spec_code < 0x10601)
  5994			return 0;
  5995	
> 5996		req->fid = cpu_to_le16(fid);

  5997		resp = hwrm_req_hold(bp, req);
  5998		rc = hwrm_req_send(bp, req);
  5999		if (!rc)
  6000			*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
  6001	
  6002		hwrm_req_drop(bp, req);
  6003		return rc;
  6004	}
  6005	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b9aa56cc10d2..f5175f53f39b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4043,8 +4043,8 @@  static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
 				    struct bnxt_stats_mem *stats)
 {
-	struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_qstats_ext_input req = {0};
+	struct hwrm_func_qstats_ext_output *resp;
+	struct hwrm_func_qstats_ext_input *req;
 	__le64 *hw_masks;
 	int rc;
 
@@ -4052,19 +4052,20 @@  static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
 	    !(bp->flags & BNXT_FLAG_CHIP_P5))
 		return -EOPNOTSUPP;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
 	if (rc)
-		goto qstat_exit;
+		return rc;
 
-	hw_masks = &resp->rx_ucast_pkts;
-	bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
+	req->fid = cpu_to_le16(0xffff);
+	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
 
-qstat_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	if (!rc) {
+		hw_masks = &resp->rx_ucast_pkts;
+		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
+	}
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -4533,34 +4534,35 @@  static void bnxt_enable_int(struct bnxt *bp)
 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
 			    bool async_only)
 {
-	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_drv_rgtr_input req = {0};
 	DECLARE_BITMAP(async_events_bmap, 256);
 	u32 *events = (u32 *)async_events_bmap;
+	struct hwrm_func_drv_rgtr_output *resp;
+	struct hwrm_func_drv_rgtr_input *req;
 	u32 flags;
 	int rc, i;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
+	if (rc)
+		return rc;
 
-	req.enables =
-		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
-			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
-			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
+				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
 
-	req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
-	req.flags = cpu_to_le32(flags);
-	req.ver_maj_8b = DRV_VER_MAJ;
-	req.ver_min_8b = DRV_VER_MIN;
-	req.ver_upd_8b = DRV_VER_UPD;
-	req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
-	req.ver_min = cpu_to_le16(DRV_VER_MIN);
-	req.ver_upd = cpu_to_le16(DRV_VER_UPD);
+	req->flags = cpu_to_le32(flags);
+	req->ver_maj_8b = DRV_VER_MAJ;
+	req->ver_min_8b = DRV_VER_MIN;
+	req->ver_upd_8b = DRV_VER_UPD;
+	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+	req->ver_min = cpu_to_le16(DRV_VER_MIN);
+	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
 
 	if (BNXT_PF(bp)) {
 		u32 data[8];
@@ -4577,14 +4579,14 @@  int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
 		}
 
 		for (i = 0; i < 8; i++)
-			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
+			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
 
-		req.enables |=
+		req->enables |=
 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
 	}
 
 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
-		req.flags |= cpu_to_le32(
+		req->flags |= cpu_to_le32(
 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
 
 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
@@ -4603,57 +4605,63 @@  int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
 		}
 	}
 	for (i = 0; i < 8; i++)
-		req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
 
 	if (async_only)
-		req.enables =
+		req->enables =
 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
 		if (resp->flags &
 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
 {
-	struct hwrm_func_drv_unrgtr_input req = {0};
+	struct hwrm_func_drv_unrgtr_input *req;
+	int rc;
 
 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
+	if (rc)
+		return rc;
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
 {
-	u32 rc = 0;
-	struct hwrm_tunnel_dst_port_free_input req = {0};
+	struct hwrm_tunnel_dst_port_free_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
-	req.tunnel_type = tunnel_type;
+	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
+	if (rc)
+		return rc;
+
+	req->tunnel_type = tunnel_type;
 
 	switch (tunnel_type) {
 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
-		req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
 		break;
 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
-		req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
 		break;
 	default:
 		break;
 	}
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
 			   rc);
@@ -4663,17 +4671,19 @@  static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
 					   u8 tunnel_type)
 {
-	u32 rc = 0;
-	struct hwrm_tunnel_dst_port_alloc_input req = {0};
-	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_tunnel_dst_port_alloc_output *resp;
+	struct hwrm_tunnel_dst_port_alloc_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
+	if (rc)
+		return rc;
 
-	req.tunnel_type = tunnel_type;
-	req.tunnel_dst_port_val = port;
+	req->tunnel_type = tunnel_type;
+	req->tunnel_dst_port_val = port;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc) {
 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
 			   rc);
@@ -4693,33 +4703,40 @@  static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
 	}
 
 err_out:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
 {
-	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+	struct hwrm_cfa_l2_set_rx_mask_input *req;
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
-	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
+	if (rc)
+		return rc;
 
-	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
-	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
-	req.mask = cpu_to_le32(vnic->rx_mask);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+	req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+	req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+	req->mask = cpu_to_le32(vnic->rx_mask);
+	return hwrm_req_send_silent(bp, req);
 }
 
 #ifdef CONFIG_RFS_ACCEL
 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
 					    struct bnxt_ntuple_filter *fltr)
 {
-	struct hwrm_cfa_ntuple_filter_free_input req = {0};
+	struct hwrm_cfa_ntuple_filter_free_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
-	req.ntuple_filter_id = fltr->filter_id;
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
+	if (rc)
+		return rc;
+
+	req->ntuple_filter_id = fltr->filter_id;
+	return hwrm_req_send(bp, req);
 }
 
 #define BNXT_NTP_FLTR_FLAGS					\
@@ -4744,69 +4761,70 @@  static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
 					     struct bnxt_ntuple_filter *fltr)
 {
-	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
+	struct hwrm_cfa_ntuple_filter_alloc_input *req;
 	struct flow_keys *keys = &fltr->fkeys;
 	struct bnxt_vnic_info *vnic;
 	u32 flags = 0;
-	int rc = 0;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
-	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
+	if (rc)
+		return rc;
+
+	req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
 
 	if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
-		req.dst_id = cpu_to_le16(fltr->rxq);
+		req->dst_id = cpu_to_le16(fltr->rxq);
 	} else {
 		vnic = &bp->vnic_info[fltr->rxq + 1];
-		req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
+		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
 	}
-	req.flags = cpu_to_le32(flags);
-	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+	req->flags = cpu_to_le32(flags);
+	req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
 
-	req.ethertype = htons(ETH_P_IP);
-	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
-	req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
-	req.ip_protocol = keys->basic.ip_proto;
+	req->ethertype = htons(ETH_P_IP);
+	memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
+	req->ip_protocol = keys->basic.ip_proto;
 
 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
 		int i;
 
-		req.ethertype = htons(ETH_P_IPV6);
-		req.ip_addr_type =
+		req->ethertype = htons(ETH_P_IPV6);
+		req->ip_addr_type =
 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
-		*(struct in6_addr *)&req.src_ipaddr[0] =
+		*(struct in6_addr *)&req->src_ipaddr[0] =
 			keys->addrs.v6addrs.src;
-		*(struct in6_addr *)&req.dst_ipaddr[0] =
+		*(struct in6_addr *)&req->dst_ipaddr[0] =
 			keys->addrs.v6addrs.dst;
 		for (i = 0; i < 4; i++) {
-			req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
-			req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+			req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+			req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
 		}
 	} else {
-		req.src_ipaddr[0] = keys->addrs.v4addrs.src;
-		req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
-		req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
-		req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+		req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+		req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
 	}
 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
-		req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
-		req.tunnel_type =
+		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+		req->tunnel_type =
 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
 	}
 
-	req.src_port = keys->ports.src;
-	req.src_port_mask = cpu_to_be16(0xffff);
-	req.dst_port = keys->ports.dst;
-	req.dst_port_mask = cpu_to_be16(0xffff);
+	req->src_port = keys->ports.src;
+	req->src_port_mask = cpu_to_be16(0xffff);
+	req->dst_port = keys->ports.dst;
+	req->dst_port_mask = cpu_to_be16(0xffff);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (!rc) {
-		resp = bnxt_get_hwrm_resp_addr(bp, &req);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	if (!rc)
 		fltr->filter_id = resp->ntuple_filter_id;
-	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 #endif
@@ -4814,62 +4832,62 @@  static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
 				     u8 *mac_addr)
 {
-	u32 rc = 0;
-	struct hwrm_cfa_l2_filter_alloc_input req = {0};
-	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_cfa_l2_filter_alloc_output *resp;
+	struct hwrm_cfa_l2_filter_alloc_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
-	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
+	if (rc)
+		return rc;
+
+	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
-		req.flags |=
+		req->flags |=
 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
-	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
-	req.enables =
+	req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+	req->enables =
 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
-	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
-	req.l2_addr_mask[0] = 0xff;
-	req.l2_addr_mask[1] = 0xff;
-	req.l2_addr_mask[2] = 0xff;
-	req.l2_addr_mask[3] = 0xff;
-	req.l2_addr_mask[4] = 0xff;
-	req.l2_addr_mask[5] = 0xff;
-
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	memcpy(req->l2_addr, mac_addr, ETH_ALEN);
+	req->l2_addr_mask[0] = 0xff;
+	req->l2_addr_mask[1] = 0xff;
+	req->l2_addr_mask[2] = 0xff;
+	req->l2_addr_mask[3] = 0xff;
+	req->l2_addr_mask[4] = 0xff;
+	req->l2_addr_mask[5] = 0xff;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
 							resp->l2_filter_id;
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
 {
+	struct hwrm_cfa_l2_filter_free_input *req;
 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
-	int rc = 0;
+	int rc;
 
 	/* Any associated ntuple filters will also be cleared by firmware. */
-	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+	if (rc)
+		return rc;
+	hwrm_req_hold(bp, req);
 	for (i = 0; i < num_of_vnics; i++) {
 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
 
 		for (j = 0; j < vnic->uc_filter_count; j++) {
-			struct hwrm_cfa_l2_filter_free_input req = {0};
-
-			bnxt_hwrm_cmd_hdr_init(bp, &req,
-					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
-
-			req.l2_filter_id = vnic->fw_l2_filter_id[j];
+			req->l2_filter_id = vnic->fw_l2_filter_id[j];
 
-			rc = _hwrm_send_message(bp, &req, sizeof(req),
-						HWRM_CMD_TIMEOUT);
+			rc = hwrm_req_send(bp, req);
 		}
 		vnic->uc_filter_count = 0;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
-
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -4877,12 +4895,15 @@  static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
 {
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
-	struct hwrm_vnic_tpa_cfg_input req = {0};
+	struct hwrm_vnic_tpa_cfg_input *req;
+	int rc;
 
 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
+	if (rc)
+		return rc;
 
 	if (tpa_flags) {
 		u16 mss = bp->dev->mtu - 40;
@@ -4896,9 +4917,9 @@  static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
 		if (tpa_flags & BNXT_FLAG_GRO)
 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
 
-		req.flags = cpu_to_le32(flags);
+		req->flags = cpu_to_le32(flags);
 
-		req.enables =
+		req->enables =
 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
@@ -4922,14 +4943,14 @@  static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
 		} else {
 			segs = ilog2(nsegs);
 		}
-		req.max_agg_segs = cpu_to_le16(segs);
-		req.max_aggs = cpu_to_le16(max_aggs);
+		req->max_agg_segs = cpu_to_le16(segs);
+		req->max_aggs = cpu_to_le16(max_aggs);
 
-		req.min_agg_len = cpu_to_le32(512);
+		req->min_agg_len = cpu_to_le32(512);
 	}
-	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
@@ -5073,86 +5094,102 @@  static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
 {
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
-	struct hwrm_vnic_rss_cfg_input req = {0};
+	struct hwrm_vnic_rss_cfg_input *req;
+	int rc;
 
 	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
+	if (rc)
+		return rc;
+
 	if (set_rss) {
 		bnxt_fill_hw_rss_tbl(bp, vnic);
-		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
-		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
-		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
-		req.hash_key_tbl_addr =
+		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+		req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+		req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+		req->hash_key_tbl_addr =
 			cpu_to_le64(vnic->rss_hash_key_dma_addr);
 	}
-	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
 {
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
-	struct hwrm_vnic_rss_cfg_input req = {0};
+	struct hwrm_vnic_rss_cfg_input *req;
 	dma_addr_t ring_tbl_map;
 	u32 i, nr_ctxs;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
+	if (rc)
+		return rc;
+
+	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	if (!set_rss)
+		return hwrm_req_send(bp, req);
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
-	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
-	if (!set_rss) {
-		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-		return 0;
-	}
 	bnxt_fill_hw_rss_tbl(bp, vnic);
-	req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
-	req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
-	req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+	req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
 	ring_tbl_map = vnic->rss_table_dma_addr;
 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
-	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
-		int rc;
 
-		req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
-		req.ring_table_pair_index = i;
-		req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
-		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	hwrm_req_hold(bp, req);
+	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
+		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+		req->ring_table_pair_index = i;
+		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+		rc = hwrm_req_send(bp, req);
 		if (rc)
-			return rc;
+			goto exit;
 	}
-	return 0;
+
+exit:
+	hwrm_req_drop(bp, req);
+	return rc;
 }
 
 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
 {
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
-	struct hwrm_vnic_plcmodes_cfg_input req = {0};
+	struct hwrm_vnic_plcmodes_cfg_input *req;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
-	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
-				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
-				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
-	req.enables =
+	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+				 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+				 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+	req->enables =
 		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
 			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
 	/* thresholds not implemented in firmware yet */
-	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
-	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
-	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+	req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+	return hwrm_req_send(bp, req);
 }
 
 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
 					u16 ctx_idx)
 {
-	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
-	req.rss_cos_lb_ctx_id =
+	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+		return;
+
+	req->rss_cos_lb_ctx_id =
 		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
 
-	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	hwrm_req_send(bp, req);
 	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
 }
 
@@ -5173,20 +5210,20 @@  static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
 
 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
 {
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
 	int rc;
-	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
-	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
-						bp->hwrm_cmd_resp_addr;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
-			       -1);
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 
 	return rc;
 }
@@ -5200,47 +5237,50 @@  static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
 
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
 {
-	unsigned int ring = 0, grp_idx;
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
-	struct hwrm_vnic_cfg_input req = {0};
+	struct hwrm_vnic_cfg_input *req;
+	unsigned int ring = 0, grp_idx;
 	u16 def_vlan = 0;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
+	if (rc)
+		return rc;
 
 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
 
-		req.default_rx_ring_id =
+		req->default_rx_ring_id =
 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
-		req.default_cmpl_ring_id =
+		req->default_cmpl_ring_id =
 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
-		req.enables =
+		req->enables =
 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
 		goto vnic_mru;
 	}
-	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
+	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
 	/* Only RSS support for now TBD: COS & LB */
 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
-		req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
-		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
 					   VNIC_CFG_REQ_ENABLES_MRU);
 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
-		req.rss_rule =
+		req->rss_rule =
 			cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
-		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
 					   VNIC_CFG_REQ_ENABLES_MRU);
-		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
+		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
 	} else {
-		req.rss_rule = cpu_to_le16(0xffff);
+		req->rss_rule = cpu_to_le16(0xffff);
 	}
 
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
-		req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
-		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
+		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
+		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
 	} else {
-		req.cos_rule = cpu_to_le16(0xffff);
+		req->cos_rule = cpu_to_le16(0xffff);
 	}
 
 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -5251,34 +5291,36 @@  int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
 		ring = bp->rx_nr_rings - 1;
 
 	grp_idx = bp->rx_ring[ring].bnapi->index;
-	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
-	req.lb_rule = cpu_to_le16(0xffff);
+	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+	req->lb_rule = cpu_to_le16(0xffff);
 vnic_mru:
-	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+	req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
 
-	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
 #ifdef CONFIG_BNXT_SRIOV
 	if (BNXT_VF(bp))
 		def_vlan = bp->vf.vlan;
 #endif
 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
-		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
 	if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
-		req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
+		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
 {
 	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
-		struct hwrm_vnic_free_input req = {0};
+		struct hwrm_vnic_free_input *req;
 
-		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
-		req.vnic_id =
+		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
+			return;
+
+		req->vnic_id =
 			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
 
-		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		hwrm_req_send(bp, req);
 		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
 	}
 }
@@ -5295,11 +5337,15 @@  static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
 				unsigned int start_rx_ring_idx,
 				unsigned int nr_rings)
 {
-	int rc = 0;
 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
-	struct hwrm_vnic_alloc_input req = {0};
-	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_alloc_output *resp;
+	struct hwrm_vnic_alloc_input *req;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
+	if (rc)
+		return rc;
 
 	if (bp->flags & BNXT_FLAG_CHIP_P5)
 		goto vnic_no_ring_grps;
@@ -5319,22 +5365,20 @@  static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
 	if (vnic_id == 0)
-		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
-
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 {
-	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_vnic_qcaps_input req = {0};
+	struct hwrm_vnic_qcaps_output *resp;
+	struct hwrm_vnic_qcaps_input *req;
 	int rc;
 
 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
@@ -5342,9 +5386,12 @@  static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 	if (bp->hwrm_spec_code < 0x10600)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		u32 flags = le32_to_cpu(resp->flags);
 
@@ -5370,92 +5417,96 @@  static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
 {
+	struct hwrm_ring_grp_alloc_output *resp;
+	struct hwrm_ring_grp_alloc_input *req;
+	int rc;
 	u16 i;
-	u32 rc = 0;
 
 	if (bp->flags & BNXT_FLAG_CHIP_P5)
 		return 0;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
 	for (i = 0; i < bp->rx_nr_rings; i++) {
-		struct hwrm_ring_grp_alloc_input req = {0};
-		struct hwrm_ring_grp_alloc_output *resp =
-					bp->hwrm_cmd_resp_addr;
 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
 
-		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
+		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
+		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
 
-		req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
-		req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
-		req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
-		req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
+		rc = hwrm_req_send(bp, req);
 
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
 		if (rc)
 			break;
 
 		bp->grp_info[grp_idx].fw_grp_id =
 			le32_to_cpu(resp->ring_group_id);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
 {
+	struct hwrm_ring_grp_free_input *req;
 	u16 i;
-	struct hwrm_ring_grp_free_input req = {0};
 
 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
 		return;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
+		return;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	hwrm_req_hold(bp, req);
 	for (i = 0; i < bp->cp_nr_rings; i++) {
 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
 			continue;
-		req.ring_group_id =
+		req->ring_group_id =
 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
 
-		_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		hwrm_req_send(bp, req);
 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 }
 
 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
 				    struct bnxt_ring_struct *ring,
 				    u32 ring_type, u32 map_index)
 {
-	int rc = 0, err = 0;
-	struct hwrm_ring_alloc_input req = {0};
-	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_ring_alloc_output *resp;
+	struct hwrm_ring_alloc_input *req;
 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
 	struct bnxt_ring_grp_info *grp_info;
+	int rc, err = 0;
 	u16 ring_id;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
+	if (rc)
+		goto exit;
 
-	req.enables = 0;
+	req->enables = 0;
 	if (rmem->nr_pages > 1) {
-		req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
+		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
 		/* Page size is in log2 units */
-		req.page_size = BNXT_PAGE_SHIFT;
-		req.page_tbl_depth = 1;
+		req->page_size = BNXT_PAGE_SHIFT;
+		req->page_tbl_depth = 1;
 	} else {
-		req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
+		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
 	}
-	req.fbo = 0;
+	req->fbo = 0;
 	/* Association of ring index with doorbell index and MSIX number */
-	req.logical_id = cpu_to_le16(map_index);
+	req->logical_id = cpu_to_le16(map_index);
 
 	switch (ring_type) {
 	case HWRM_RING_ALLOC_TX: {
@@ -5463,67 +5514,67 @@  static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
 
 		txr = container_of(ring, struct bnxt_tx_ring_info,
 				   tx_ring_struct);
-		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
 		/* Association of transmit ring with completion ring */
 		grp_info = &bp->grp_info[ring->grp_idx];
-		req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
-		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
-		req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
-		req.queue_id = cpu_to_le16(ring->queue_id);
+		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
+		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
+		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+		req->queue_id = cpu_to_le16(ring->queue_id);
 		break;
 	}
 	case HWRM_RING_ALLOC_RX:
-		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
-		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+		req->length = cpu_to_le32(bp->rx_ring_mask + 1);
 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
 			u16 flags = 0;
 
 			/* Association of rx ring with stats context */
 			grp_info = &bp->grp_info[ring->grp_idx];
-			req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
-			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
-			req.enables |= cpu_to_le32(
+			req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+			req->enables |= cpu_to_le32(
 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
 			if (NET_IP_ALIGN == 2)
 				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
-			req.flags = cpu_to_le16(flags);
+			req->flags = cpu_to_le16(flags);
 		}
 		break;
 	case HWRM_RING_ALLOC_AGG:
 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
-			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
 			/* Association of agg ring with rx ring */
 			grp_info = &bp->grp_info[ring->grp_idx];
-			req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
-			req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
-			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
-			req.enables |= cpu_to_le32(
+			req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+			req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+			req->enables |= cpu_to_le32(
 				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
 		} else {
-			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
 		}
-		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+		req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
 		break;
 	case HWRM_RING_ALLOC_CMPL:
-		req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
-		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
 			/* Association of cp ring with nq */
 			grp_info = &bp->grp_info[map_index];
-			req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
-			req.cq_handle = cpu_to_le64(ring->handle);
-			req.enables |= cpu_to_le32(
+			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+			req->cq_handle = cpu_to_le64(ring->handle);
+			req->enables |= cpu_to_le32(
 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
 		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
-			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
 		}
 		break;
 	case HWRM_RING_ALLOC_NQ:
-		req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
-		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
 		if (bp->flags & BNXT_FLAG_USING_MSIX)
-			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
 		break;
 	default:
 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
@@ -5531,12 +5582,13 @@  static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
 		return -1;
 	}
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	err = le16_to_cpu(resp->error_code);
 	ring_id = le16_to_cpu(resp->ring_id);
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 
+exit:
 	if (rc || err) {
 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
 			   ring_type, rc, err);
@@ -5551,23 +5603,28 @@  static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
 	int rc;
 
 	if (BNXT_PF(bp)) {
-		struct hwrm_func_cfg_input req = {0};
+		struct hwrm_func_cfg_input *req;
+
+		rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+		if (rc)
+			return rc;
 
-		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-		req.fid = cpu_to_le16(0xffff);
-		req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
-		req.async_event_cr = cpu_to_le16(idx);
-		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		req->fid = cpu_to_le16(0xffff);
+		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+		req->async_event_cr = cpu_to_le16(idx);
+		return hwrm_req_send(bp, req);
 	} else {
-		struct hwrm_func_vf_cfg_input req = {0};
+		struct hwrm_func_vf_cfg_input *req;
 
-		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
-		req.enables =
+		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
+		if (rc)
+			return rc;
+
+		req->enables =
 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
-		req.async_event_cr = cpu_to_le16(idx);
-		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		req->async_event_cr = cpu_to_le16(idx);
+		return hwrm_req_send(bp, req);
 	}
-	return rc;
 }
 
 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
@@ -5738,23 +5795,27 @@  static int hwrm_ring_free_send_msg(struct bnxt *bp,
 				   struct bnxt_ring_struct *ring,
 				   u32 ring_type, int cmpl_ring_id)
 {
+	struct hwrm_ring_free_output *resp;
+	struct hwrm_ring_free_input *req;
+	u16 error_code = 0;
 	int rc;
-	struct hwrm_ring_free_input req = {0};
-	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
-	u16 error_code;
 
 	if (BNXT_NO_FW_ACCESS(bp))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
-	req.ring_type = ring_type;
-	req.ring_id = cpu_to_le16(ring->fw_ring_id);
+	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
+	if (rc)
+		goto exit;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	error_code = le16_to_cpu(resp->error_code);
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+	req->ring_type = ring_type;
+	req->ring_id = cpu_to_le16(ring->fw_ring_id);
 
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	error_code = le16_to_cpu(resp->error_code);
+	hwrm_req_drop(bp, req);
+exit:
 	if (rc || error_code) {
 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
 			   ring_type, rc, error_code);
@@ -5869,20 +5930,23 @@  static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
 
 static int bnxt_hwrm_get_rings(struct bnxt *bp)
 {
-	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp;
+	struct hwrm_func_qcfg_input *req;
 	int rc;
 
 	if (bp->hwrm_spec_code < 0x10601)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+	if (rc)
+		return rc;
+
+	req->fid = cpu_to_le16(0xffff);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
+		hwrm_req_drop(bp, req);
 		return rc;
 	}
 
@@ -5916,39 +5980,41 @@  static int bnxt_hwrm_get_rings(struct bnxt *bp)
 		hw_resc->resv_cp_rings = cp;
 		hw_resc->resv_stat_ctxs = stats;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return 0;
 }
 
-/* Caller must hold bp->hwrm_cmd_lock */
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
 {
-	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp;
+	struct hwrm_func_qcfg_input *req;
 	int rc;
 
 	if (bp->hwrm_spec_code < 0x10601)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
-	req.fid = cpu_to_le16(fid);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->fid = cpu_to_le16(fid);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
 
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static bool bnxt_rfs_supported(struct bnxt *bp);
 
-static void
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
-			     int tx_rings, int rx_rings, int ring_grps,
-			     int cp_rings, int stats, int vnics)
+static struct hwrm_func_cfg_input *
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+			     int ring_grps, int cp_rings, int stats, int vnics)
 {
+	struct hwrm_func_cfg_input *req;
 	u32 enables = 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
+	if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
+		return NULL;
+
 	req->fid = cpu_to_le16(0xffff);
 	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
 	req->num_tx_rings = cpu_to_le16(tx_rings);
@@ -5989,17 +6055,19 @@  __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
 		req->num_vnics = cpu_to_le16(vnics);
 	}
 	req->enables = cpu_to_le32(enables);
+	return req;
 }
 
-static void
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
-			     struct hwrm_func_vf_cfg_input *req, int tx_rings,
-			     int rx_rings, int ring_grps, int cp_rings,
-			     int stats, int vnics)
+static struct hwrm_func_vf_cfg_input *
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+			     int ring_grps, int cp_rings, int stats, int vnics)
 {
+	struct hwrm_func_vf_cfg_input *req;
 	u32 enables = 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
+	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
+		return NULL;
+
 	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
 	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
 			      FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -6031,21 +6099,27 @@  __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
 	req->num_vnics = cpu_to_le16(vnics);
 
 	req->enables = cpu_to_le32(enables);
+	return req;
 }
 
 static int
 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 			   int ring_grps, int cp_rings, int stats, int vnics)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_cfg_input *req;
 	int rc;
 
-	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
-				     cp_rings, stats, vnics);
-	if (!req.enables)
+	req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
+					   cp_rings, stats, vnics);
+	if (!req)
+		return -ENOMEM;
+
+	if (!req->enables) {
+		hwrm_req_drop(bp, req);
 		return 0;
+	}
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		return rc;
 
@@ -6059,7 +6133,7 @@  static int
 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 			   int ring_grps, int cp_rings, int stats, int vnics)
 {
-	struct hwrm_func_vf_cfg_input req = {0};
+	struct hwrm_func_vf_cfg_input *req;
 	int rc;
 
 	if (!BNXT_NEW_RM(bp)) {
@@ -6067,9 +6141,12 @@  bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 		return 0;
 	}
 
-	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
-				     cp_rings, stats, vnics);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+					   cp_rings, stats, vnics);
+	if (!req)
+		return -ENOMEM;
+
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		return rc;
 
@@ -6270,14 +6347,14 @@  static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 				    int ring_grps, int cp_rings, int stats,
 				    int vnics)
 {
-	struct hwrm_func_vf_cfg_input req = {0};
+	struct hwrm_func_vf_cfg_input *req;
 	u32 flags;
 
 	if (!BNXT_NEW_RM(bp))
 		return 0;
 
-	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
-				     cp_rings, stats, vnics);
+	req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+					   cp_rings, stats, vnics);
 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -6287,20 +6364,19 @@  static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
 
-	req.flags = cpu_to_le32(flags);
-	return hwrm_send_message_silent(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+	req->flags = cpu_to_le32(flags);
+	return hwrm_req_send_silent(bp, req);
 }
 
 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 				    int ring_grps, int cp_rings, int stats,
 				    int vnics)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_cfg_input *req;
 	u32 flags;
 
-	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
-				     cp_rings, stats, vnics);
+	req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
+					   cp_rings, stats, vnics);
 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
 	if (BNXT_NEW_RM(bp)) {
 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -6314,9 +6390,8 @@  static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
 	}
 
-	req.flags = cpu_to_le32(flags);
-	return hwrm_send_message_silent(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+	req->flags = cpu_to_le32(flags);
+	return hwrm_req_send_silent(bp, req);
 }
 
 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6337,9 +6412,9 @@  static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 
 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
 {
-	struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
-	struct hwrm_ring_aggint_qcaps_input req = {0};
+	struct hwrm_ring_aggint_qcaps_output *resp;
+	struct hwrm_ring_aggint_qcaps_input *req;
 	int rc;
 
 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
@@ -6355,9 +6430,11 @@  static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
 	if (bp->hwrm_spec_code < 0x10902)
 		return;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
+		return;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
 	if (!rc) {
 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
@@ -6377,7 +6454,7 @@  static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 }
 
 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
@@ -6445,37 +6522,40 @@  static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
 }
 
-/* Caller holds bp->hwrm_cmd_lock */
 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
 				   struct bnxt_coal *hw_coal)
 {
-	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
 	u32 nq_params = coal_cap->nq_params;
 	u16 tmr;
+	int rc;
 
 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
-			       -1, -1);
-	req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
-	req.flags =
+	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+	if (rc)
+		return rc;
+
+	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
+	req->flags =
 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
 
 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
-	req.int_lat_tmr_min = cpu_to_le16(tmr);
-	req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
-	return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->int_lat_tmr_min = cpu_to_le16(tmr);
+	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+	return hwrm_req_send(bp, req);
 }
 
 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
 {
-	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 	struct bnxt_coal coal;
+	int rc;
 
 	/* Tick values in micro seconds.
 	 * 1 coal_buf x bufs_per_record = 1 completion record.
@@ -6488,48 +6568,53 @@  int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
 	if (!bnapi->rx_ring)
 		return -ENODEV;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
-			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
+	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
 
-	req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
+	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
 
-	return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
-				 HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req_rx);
 }
 
 int bnxt_hwrm_set_coal(struct bnxt *bp)
 {
-	int i, rc = 0;
-	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
-							   req_tx = {0}, *req;
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
+							   *req;
+	int i, rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
-			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
-	bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
-			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+	if (rc)
+		return rc;
+
+	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+	if (rc) {
+		hwrm_req_drop(bp, req_rx);
+		return rc;
+	}
 
-	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
-	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
+	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
+	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	hwrm_req_hold(bp, req_rx);
+	hwrm_req_hold(bp, req_tx);
 	for (i = 0; i < bp->cp_nr_rings; i++) {
 		struct bnxt_napi *bnapi = bp->bnapi[i];
 		struct bnxt_coal *hw_coal;
 		u16 ring_id;
 
-		req = &req_rx;
+		req = req_rx;
 		if (!bnapi->rx_ring) {
 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
-			req = &req_tx;
+			req = req_tx;
 		} else {
 			ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
 		}
 		req->ring_id = cpu_to_le16(ring_id);
 
-		rc = _hwrm_send_message(bp, req, sizeof(*req),
-					HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
 		if (rc)
 			break;
 
@@ -6537,11 +6622,10 @@  int bnxt_hwrm_set_coal(struct bnxt *bp)
 			continue;
 
 		if (bnapi->rx_ring && bnapi->tx_ring) {
-			req = &req_tx;
+			req = req_tx;
 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
 			req->ring_id = cpu_to_le16(ring_id);
-			rc = _hwrm_send_message(bp, req, sizeof(*req),
-						HWRM_CMD_TIMEOUT);
+			rc = hwrm_req_send(bp, req);
 			if (rc)
 				break;
 		}
@@ -6551,14 +6635,15 @@  int bnxt_hwrm_set_coal(struct bnxt *bp)
 			hw_coal = &bp->tx_coal;
 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req_rx);
+	hwrm_req_drop(bp, req_tx);
 	return rc;
 }
 
 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 {
-	struct hwrm_stat_ctx_clr_stats_input req0 = {0};
-	struct hwrm_stat_ctx_free_input req = {0};
+	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
+	struct hwrm_stat_ctx_free_input *req;
 	int i;
 
 	if (!bp->bnapi)
@@ -6567,53 +6652,60 @@  static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 		return;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
-
-	mutex_lock(&bp->hwrm_cmd_lock);
+	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
+		return;
+	if (BNXT_FW_MAJ(bp) <= 20) {
+		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
+			hwrm_req_drop(bp, req);
+			return;
+		}
+		hwrm_req_hold(bp, req0);
+	}
+	hwrm_req_hold(bp, req);
 	for (i = 0; i < bp->cp_nr_rings; i++) {
 		struct bnxt_napi *bnapi = bp->bnapi[i];
 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 
 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
-			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-			if (BNXT_FW_MAJ(bp) <= 20) {
-				req0.stat_ctx_id = req.stat_ctx_id;
-				_hwrm_send_message(bp, &req0, sizeof(req0),
-						   HWRM_CMD_TIMEOUT);
+			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+			if (req0) {
+				req0->stat_ctx_id = req->stat_ctx_id;
+				hwrm_req_send(bp, req0);
 			}
-			_hwrm_send_message(bp, &req, sizeof(req),
-					   HWRM_CMD_TIMEOUT);
+			hwrm_req_send(bp, req);
 
 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
+	if (req0)
+		hwrm_req_drop(bp, req0);
 }
 
 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
 {
-	int rc = 0, i;
-	struct hwrm_stat_ctx_alloc_input req = {0};
-	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_stat_ctx_alloc_output *resp;
+	struct hwrm_stat_ctx_alloc_input *req;
+	int rc, i;
 
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
+	if (rc)
+		return rc;
 
-	req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
-	req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
+	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
+	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	resp = hwrm_req_hold(bp, req);
 	for (i = 0; i < bp->cp_nr_rings; i++) {
 		struct bnxt_napi *bnapi = bp->bnapi[i];
 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 
-		req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
+		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
 
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
 		if (rc)
 			break;
 
@@ -6621,22 +6713,25 @@  static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
 
 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 {
-	struct hwrm_func_qcfg_input req = {0};
-	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_qcfg_output *resp;
+	struct hwrm_func_qcfg_input *req;
 	u32 min_db_offset = 0;
 	u16 flags;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+	if (rc)
+		return rc;
+
+	req->fid = cpu_to_le16(0xffff);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto func_qcfg_exit;
 
@@ -6696,7 +6791,7 @@  static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 		bp->db_size = pci_resource_len(bp->pdev, 2);
 
 func_qcfg_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -6735,17 +6830,19 @@  static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
 
 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
 {
-	struct hwrm_func_backing_store_qcaps_input req = {0};
-	struct hwrm_func_backing_store_qcaps_output *resp =
-		bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_backing_store_qcaps_output *resp;
+	struct hwrm_func_backing_store_qcaps_input *req;
 	int rc;
 
 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
 	if (!rc) {
 		struct bnxt_ctx_pg_info *ctx_pg;
 		struct bnxt_ctx_mem_info *ctx;
@@ -6810,7 +6907,7 @@  static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
 		rc = 0;
 	}
 ctx_err:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -6841,15 +6938,17 @@  static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
 
 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
 {
-	struct hwrm_func_backing_store_cfg_input req = {0};
+	struct hwrm_func_backing_store_cfg_input *req;
 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
 	struct bnxt_ctx_pg_info *ctx_pg;
-	u32 req_len = sizeof(req);
+	void **__req = (void **)&req;
+	u32 req_len = sizeof(*req);
 	__le32 *num_entries;
 	__le64 *pg_dir;
 	u32 flags = 0;
 	u8 *pg_attr;
 	u32 ena;
+	int rc;
 	int i;
 
 	if (!ctx)
@@ -6857,90 +6956,93 @@  static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
 
 	if (req_len > bp->hwrm_max_ext_req_len)
 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
-	req.enables = cpu_to_le32(enables);
+	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
+	if (rc)
+		return rc;
 
+	req->enables = cpu_to_le32(enables);
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
 		ctx_pg = &ctx->qp_mem;
-		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
-		req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
-		req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
-		req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
+		req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
+		req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
+		req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
-				      &req.qpc_pg_size_qpc_lvl,
-				      &req.qpc_page_dir);
+				      &req->qpc_pg_size_qpc_lvl,
+				      &req->qpc_page_dir);
 	}
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
 		ctx_pg = &ctx->srq_mem;
-		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
-		req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
-		req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
+		req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
+		req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
-				      &req.srq_pg_size_srq_lvl,
-				      &req.srq_page_dir);
+				      &req->srq_pg_size_srq_lvl,
+				      &req->srq_page_dir);
 	}
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
 		ctx_pg = &ctx->cq_mem;
-		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
-		req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
-		req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
-		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
-				      &req.cq_page_dir);
+		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
+		req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
+		req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+				      &req->cq_pg_size_cq_lvl,
+				      &req->cq_page_dir);
 	}
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
 		ctx_pg = &ctx->vnic_mem;
-		req.vnic_num_vnic_entries =
+		req->vnic_num_vnic_entries =
 			cpu_to_le16(ctx->vnic_max_vnic_entries);
-		req.vnic_num_ring_table_entries =
+		req->vnic_num_ring_table_entries =
 			cpu_to_le16(ctx->vnic_max_ring_table_entries);
-		req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+		req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
-				      &req.vnic_pg_size_vnic_lvl,
-				      &req.vnic_page_dir);
+				      &req->vnic_pg_size_vnic_lvl,
+				      &req->vnic_page_dir);
 	}
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
 		ctx_pg = &ctx->stat_mem;
-		req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
-		req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+		req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
+		req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
-				      &req.stat_pg_size_stat_lvl,
-				      &req.stat_page_dir);
+				      &req->stat_pg_size_stat_lvl,
+				      &req->stat_page_dir);
 	}
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
 		ctx_pg = &ctx->mrav_mem;
-		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
 		if (ctx->mrav_num_entries_units)
 			flags |=
 			FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
-		req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+		req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
-				      &req.mrav_pg_size_mrav_lvl,
-				      &req.mrav_page_dir);
+				      &req->mrav_pg_size_mrav_lvl,
+				      &req->mrav_page_dir);
 	}
 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
 		ctx_pg = &ctx->tim_mem;
-		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
-		req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
+		req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
-				      &req.tim_pg_size_tim_lvl,
-				      &req.tim_page_dir);
+				      &req->tim_pg_size_tim_lvl,
+				      &req->tim_page_dir);
 	}
-	for (i = 0, num_entries = &req.tqm_sp_num_entries,
-	     pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
-	     pg_dir = &req.tqm_sp_page_dir,
+	for (i = 0, num_entries = &req->tqm_sp_num_entries,
+	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
+	     pg_dir = &req->tqm_sp_page_dir,
 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
 	     i < BNXT_MAX_TQM_RINGS;
 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
 		if (!(enables & ena))
 			continue;
 
-		req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
+		req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
 		ctx_pg = ctx->tqm_mem[i];
 		*num_entries = cpu_to_le32(ctx_pg->entries);
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
 	}
-	req.flags = cpu_to_le32(flags);
-	return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
+	req->flags = cpu_to_le32(flags);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -7220,17 +7322,18 @@  static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 
 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
 {
-	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_resource_qcaps_input req = {0};
+	struct hwrm_func_resource_qcaps_output *resp;
+	struct hwrm_func_resource_qcaps_input *req;
 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
-				       HWRM_CMD_TIMEOUT);
+	req->fid = cpu_to_le16(0xffff);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
 	if (rc)
 		goto hwrm_func_resc_qcaps_exit;
 
@@ -7271,15 +7374,14 @@  int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
 	}
 hwrm_func_resc_qcaps_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
-/* bp->hwrm_cmd_lock already held. */
 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 {
-	struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_port_mac_ptp_qcfg_input req = {0};
+	struct hwrm_port_mac_ptp_qcfg_output *resp;
+	struct hwrm_port_mac_ptp_qcfg_input *req;
 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 	u8 flags;
 	int rc;
@@ -7289,21 +7391,27 @@  static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 		goto no_ptp;
 	}
 
-	req.port_id = cpu_to_le16(bp->pf.port_id);
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
 	if (rc)
 		goto no_ptp;
 
+	req->port_id = cpu_to_le16(bp->pf.port_id);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	if (rc)
+		goto exit;
+
 	flags = resp->flags;
 	if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
 		rc = -ENODEV;
-		goto no_ptp;
+		goto exit;
 	}
 	if (!ptp) {
 		ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
-		if (!ptp)
-			return -ENOMEM;
+		if (!ptp) {
+			rc = -ENOMEM;
+			goto exit;
+		}
 		ptp->bp = bp;
 		bp->ptp_cfg = ptp;
 	}
@@ -7315,14 +7423,16 @@  static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
 	} else {
 		rc = -ENODEV;
-		goto no_ptp;
+		goto exit;
 	}
 	rc = bnxt_ptp_init(bp);
+	if (rc)
+		netdev_warn(bp->dev, "PTP initialization failed.\n");
+exit:
+	hwrm_req_drop(bp, req);
 	if (!rc)
 		return 0;
 
-	netdev_warn(bp->dev, "PTP initialization failed.\n");
-
 no_ptp:
 	bnxt_ptp_clear(bp);
 	kfree(ptp);
@@ -7332,17 +7442,19 @@  static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
 
 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
-	int rc = 0;
-	struct hwrm_func_qcaps_input req = {0};
-	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_qcaps_output *resp;
+	struct hwrm_func_qcaps_input *req;
 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	u32 flags, flags_ext;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->fid = cpu_to_le16(0xffff);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto hwrm_func_qcaps_exit;
 
@@ -7420,7 +7532,7 @@  static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	}
 
 hwrm_func_qcaps_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -7451,19 +7563,20 @@  static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 
 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
 {
-	struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
-	int rc = 0;
+	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
 	u32 flags;
+	int rc;
 
 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
 		return 0;
 
-	resp = bp->hwrm_cmd_resp_addr;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto hwrm_cfa_adv_qcaps_exit;
 
@@ -7473,7 +7586,7 @@  static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
 
 hwrm_cfa_adv_qcaps_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -7616,17 +7729,20 @@  static int bnxt_map_fw_health_regs(struct bnxt *bp)
 
 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
 {
-	struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	struct bnxt_fw_health *fw_health = bp->fw_health;
-	struct hwrm_error_recovery_qcfg_input req = {0};
+	struct hwrm_error_recovery_qcfg_output *resp;
+	struct hwrm_error_recovery_qcfg_input *req;
 	int rc, i;
 
 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto err_recovery_out;
 	fw_health->flags = le32_to_cpu(resp->flags);
@@ -7668,7 +7784,7 @@  static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
 			resp->delay_after_reset[i];
 	}
 err_recovery_out:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	if (!rc)
 		rc = bnxt_map_fw_health_regs(bp);
 	if (rc)
@@ -7678,12 +7794,16 @@  static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
 
 static int bnxt_hwrm_func_reset(struct bnxt *bp)
 {
-	struct hwrm_func_reset_input req = {0};
+	struct hwrm_func_reset_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
-	req.enables = 0;
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
+	if (rc)
+		return rc;
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+	req->enables = 0;
+	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
@@ -7698,16 +7818,18 @@  static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
 
 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 {
-	int rc = 0;
-	struct hwrm_queue_qportcfg_input req = {0};
-	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_queue_qportcfg_output *resp;
+	struct hwrm_queue_qportcfg_input *req;
 	u8 i, j, *qptr;
 	bool no_rdma;
+	int rc = 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto qportcfg_exit;
 
@@ -7741,40 +7863,48 @@  static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 		bp->max_lltc = bp->max_tc;
 
 qportcfg_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_poll(struct bnxt *bp)
 {
-	struct hwrm_ver_get_input req = {0};
+	struct hwrm_ver_get_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
-	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
-	req.hwrm_intf_min = HWRM_VERSION_MINOR;
-	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
+	if (rc)
+		return rc;
+
+	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+	req->hwrm_intf_min = HWRM_VERSION_MINOR;
+	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
 
-	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
+	rc = hwrm_req_send(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_ver_get(struct bnxt *bp)
 {
-	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_ver_get_input req = {0};
+	struct hwrm_ver_get_output *resp;
+	struct hwrm_ver_get_input *req;
 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
 	u32 dev_caps_cfg, hwrm_ver;
 	int rc, len;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
+	if (rc)
+		return rc;
+
+	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
-	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
-	req.hwrm_intf_min = HWRM_VERSION_MINOR;
-	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+	req->hwrm_intf_min = HWRM_VERSION_MINOR;
+	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto hwrm_ver_get_exit;
 
@@ -7866,29 +7996,33 @@  static int bnxt_hwrm_ver_get(struct bnxt *bp)
 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
 
 hwrm_ver_get_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
 {
-	struct hwrm_fw_set_time_input req = {0};
+	struct hwrm_fw_set_time_input *req;
 	struct tm tm;
 	time64_t now = ktime_get_real_seconds();
+	int rc;
 
 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
 	    bp->hwrm_spec_code < 0x10400)
 		return -EOPNOTSUPP;
 
 	time64_to_tm(now, 0, &tm);
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
-	req.year = cpu_to_le16(1900 + tm.tm_year);
-	req.month = 1 + tm.tm_mon;
-	req.day = tm.tm_mday;
-	req.hour = tm.tm_hour;
-	req.minute = tm.tm_min;
-	req.second = tm.tm_sec;
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
+	if (rc)
+		return rc;
+
+	req->year = cpu_to_le16(1900 + tm.tm_year);
+	req->month = 1 + tm.tm_mon;
+	req->day = tm.tm_mday;
+	req->hour = tm.tm_hour;
+	req->minute = tm.tm_min;
+	req->second = tm.tm_sec;
+	return hwrm_req_send(bp, req);
 }
 
 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
@@ -7976,8 +8110,9 @@  static void bnxt_accumulate_all_stats(struct bnxt *bp)
 
 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
 {
+	struct hwrm_port_qstats_input *req;
 	struct bnxt_pf_info *pf = &bp->pf;
-	struct hwrm_port_qstats_input req = {0};
+	int rc;
 
 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
 		return 0;
@@ -7985,20 +8120,24 @@  static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
 		return -EOPNOTSUPP;
 
-	req.flags = flags;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
-	req.port_id = cpu_to_le16(pf->port_id);
-	req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
+	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
+	if (rc)
+		return rc;
+
+	req->flags = flags;
+	req->port_id = cpu_to_le16(pf->port_id);
+	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
-	req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
 {
-	struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
-	struct hwrm_port_qstats_ext_input req = {0};
+	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
+	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
+	struct hwrm_port_qstats_ext_output *resp_qs;
+	struct hwrm_port_qstats_ext_input *req_qs;
 	struct bnxt_pf_info *pf = &bp->pf;
 	u32 tx_stat_size;
 	int rc;
@@ -8009,46 +8148,53 @@  static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
 		return -EOPNOTSUPP;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
-	req.flags = flags;
-	req.port_id = cpu_to_le16(pf->port_id);
-	req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
-	req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
+	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
+	if (rc)
+		return rc;
+
+	req_qs->flags = flags;
+	req_qs->port_id = cpu_to_le16(pf->port_id);
+	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
+	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
 		       sizeof(struct tx_port_stats_ext) : 0;
-	req.tx_stat_size = cpu_to_le16(tx_stat_size);
-	req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
+	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
+	resp_qs = hwrm_req_hold(bp, req_qs);
+	rc = hwrm_req_send(bp, req_qs);
 	if (!rc) {
-		bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
+		bp->fw_rx_stats_ext_size =
+			le16_to_cpu(resp_qs->rx_stat_size) / 8;
 		bp->fw_tx_stats_ext_size = tx_stat_size ?
-			le16_to_cpu(resp->tx_stat_size) / 8 : 0;
+			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
 	} else {
 		bp->fw_rx_stats_ext_size = 0;
 		bp->fw_tx_stats_ext_size = 0;
 	}
+	hwrm_req_drop(bp, req_qs);
+
 	if (flags)
-		goto qstats_done;
+		return rc;
 
 	if (bp->fw_tx_stats_ext_size <=
 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
 		bp->pri2cos_valid = 0;
 		return rc;
 	}
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
-	req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
+	if (rc)
+		return rc;
+
+	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
 
-	rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
+	resp_qc = hwrm_req_hold(bp, req_qc);
+	rc = hwrm_req_send(bp, req_qc);
 	if (!rc) {
-		struct hwrm_queue_pri2cos_qcfg_output *resp2;
 		u8 *pri2cos;
 		int i, j;
 
-		resp2 = bp->hwrm_cmd_resp_addr;
-		pri2cos = &resp2->pri0_cos_queue_id;
+		pri2cos = &resp_qc->pri0_cos_queue_id;
 		for (i = 0; i < 8; i++) {
 			u8 queue_id = pri2cos[i];
 			u8 queue_idx;
@@ -8057,17 +8203,18 @@  static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
 			queue_idx = queue_id % 10;
 			if (queue_idx > BNXT_MAX_QUEUE) {
 				bp->pri2cos_valid = false;
-				goto qstats_done;
+				hwrm_req_drop(bp, req_qc);
+				return rc;
 			}
 			for (j = 0; j < bp->max_q; j++) {
 				if (bp->q_ids[j] == queue_id)
 					bp->pri2cos_idx[i] = queue_idx;
 			}
 		}
-		bp->pri2cos_valid = 1;
+		bp->pri2cos_valid = true;
 	}
-qstats_done:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req_qc);
+
 	return rc;
 }
 
@@ -8142,35 +8289,46 @@  static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
 
 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_cfg_input *req;
+	u8 evb_mode;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
 	if (br_mode == BRIDGE_MODE_VEB)
-		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
+		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
 	else if (br_mode == BRIDGE_MODE_VEPA)
-		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
+		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
 	else
 		return -EINVAL;
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (rc)
+		return rc;
+
+	req->fid = cpu_to_le16(0xffff);
+	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
+	req->evb_mode = evb_mode;
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_cfg_input *req;
+	int rc;
 
 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
-	req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (rc)
+		return rc;
+
+	req->fid = cpu_to_le16(0xffff);
+	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
 	if (size == 128)
-		req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
+		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
@@ -9118,18 +9276,20 @@  static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
 
 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
 {
-	int rc = 0;
-	struct hwrm_port_phy_qcaps_input req = {0};
-	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
 	struct bnxt_link_info *link_info = &bp->link_info;
+	struct hwrm_port_phy_qcaps_output *resp;
+	struct hwrm_port_phy_qcaps_input *req;
+	int rc = 0;
 
 	if (bp->hwrm_spec_code < 0x10201)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc)
 		goto hwrm_phy_qcaps_exit;
 
@@ -9167,7 +9327,7 @@  static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
 	bp->port_count = resp->port_cnt;
 
 hwrm_phy_qcaps_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -9180,19 +9340,21 @@  static bool bnxt_support_dropped(u16 advertising, u16 supported)
 
 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
 {
-	int rc = 0;
 	struct bnxt_link_info *link_info = &bp->link_info;
-	struct hwrm_port_phy_qcfg_input req = {0};
-	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_port_phy_qcfg_output *resp;
+	struct hwrm_port_phy_qcfg_input *req;
 	u8 link_up = link_info->link_up;
 	bool support_changed = false;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
+		hwrm_req_drop(bp, req);
 		return rc;
 	}
 
@@ -9287,7 +9449,7 @@  int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
 		/* alwasy link down if not require to update link state */
 		link_info->link_up = 0;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 
 	if (!BNXT_PHY_CFG_ABLE(bp))
 		return 0;
@@ -9397,18 +9559,20 @@  static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
 
 int bnxt_hwrm_set_pause(struct bnxt *bp)
 {
-	struct hwrm_port_phy_cfg_input req = {0};
+	struct hwrm_port_phy_cfg_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
-	bnxt_hwrm_set_pause_common(bp, &req);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+	if (rc)
+		return rc;
+
+	bnxt_hwrm_set_pause_common(bp, req);
 
 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
 	    bp->link_info.force_link_chng)
-		bnxt_hwrm_set_link_common(bp, &req);
+		bnxt_hwrm_set_link_common(bp, req);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send(bp, req);
 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
 		/* since changing of pause setting doesn't trigger any link
 		 * change event, the driver needs to update the current pause
@@ -9421,7 +9585,6 @@  int bnxt_hwrm_set_pause(struct bnxt *bp)
 			bnxt_report_link(bp);
 	}
 	bp->link_info.force_link_chng = false;
-	mutex_unlock(&bp->hwrm_cmd_lock);
 	return rc;
 }
 
@@ -9450,22 +9613,27 @@  static void bnxt_hwrm_set_eee(struct bnxt *bp,
 
 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
 {
-	struct hwrm_port_phy_cfg_input req = {0};
+	struct hwrm_port_phy_cfg_input *req;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
 	if (set_pause)
-		bnxt_hwrm_set_pause_common(bp, &req);
+		bnxt_hwrm_set_pause_common(bp, req);
 
-	bnxt_hwrm_set_link_common(bp, &req);
+	bnxt_hwrm_set_link_common(bp, req);
 
 	if (set_eee)
-		bnxt_hwrm_set_eee(bp, &req);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		bnxt_hwrm_set_eee(bp, req);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
 {
-	struct hwrm_port_phy_cfg_input req = {0};
+	struct hwrm_port_phy_cfg_input *req;
+	int rc;
 
 	if (!BNXT_SINGLE_PF(bp))
 		return 0;
@@ -9474,9 +9642,12 @@  static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
-	req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+	if (rc)
+		return rc;
+
+	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_fw_init_one(struct bnxt *bp);
@@ -9502,7 +9673,6 @@  static int bnxt_try_recover_fw(struct bnxt *bp)
 		int retry = 0, rc;
 		u32 sts;
 
-		mutex_lock(&bp->hwrm_cmd_lock);
 		do {
 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
 			rc = bnxt_hwrm_poll(bp);
@@ -9511,7 +9681,6 @@  static int bnxt_try_recover_fw(struct bnxt *bp)
 				break;
 			retry++;
 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
-		mutex_unlock(&bp->hwrm_cmd_lock);
 
 		if (!BNXT_FW_IS_HEALTHY(sts)) {
 			netdev_err(bp->dev,
@@ -9531,8 +9700,8 @@  static int bnxt_try_recover_fw(struct bnxt *bp)
 
 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 {
-	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_drv_if_change_input req = {0};
+	struct hwrm_func_drv_if_change_output *resp;
+	struct hwrm_func_drv_if_change_input *req;
 	bool fw_reset = !bp->irq_tbl;
 	bool resc_reinit = false;
 	int rc, retry = 0;
@@ -9541,29 +9710,34 @@  static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
+	if (rc)
+		return rc;
+
 	if (up)
-		req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
-	mutex_lock(&bp->hwrm_cmd_lock);
+		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
+	resp = hwrm_req_hold(bp, req);
+
+	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
 	while (retry < BNXT_FW_IF_RETRY) {
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
 		if (rc != -EAGAIN)
 			break;
 
 		msleep(50);
 		retry++;
 	}
-	if (!rc)
-		flags = le32_to_cpu(resp->flags);
-	mutex_unlock(&bp->hwrm_cmd_lock);
 
-	if (rc == -EAGAIN)
+	if (rc == -EAGAIN) {
+		hwrm_req_drop(bp, req);
 		return rc;
-	if (rc && up) {
+	} else if (!rc) {
+		flags = le32_to_cpu(resp->flags);
+	} else if (up) {
 		rc = bnxt_try_recover_fw(bp);
 		fw_reset = true;
 	}
+	hwrm_req_drop(bp, req);
 	if (rc)
 		return rc;
 
@@ -9632,8 +9806,8 @@  static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 
 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
 {
-	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_port_led_qcaps_input req = {0};
+	struct hwrm_port_led_qcaps_output *resp;
+	struct hwrm_port_led_qcaps_input *req;
 	struct bnxt_pf_info *pf = &bp->pf;
 	int rc;
 
@@ -9641,12 +9815,15 @@  static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
-	req.port_id = cpu_to_le16(pf->port_id);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(pf->port_id);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
+		hwrm_req_drop(bp, req);
 		return rc;
 	}
 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
@@ -9666,52 +9843,64 @@  static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
 			}
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return 0;
 }
 
 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
 {
-	struct hwrm_wol_filter_alloc_input req = {0};
-	struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_wol_filter_alloc_output *resp;
+	struct hwrm_wol_filter_alloc_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
-	req.port_id = cpu_to_le16(bp->pf.port_id);
-	req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
-	req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
-	memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(bp->pf.port_id);
+	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		bp->wol_filter_id = resp->wol_filter_id;
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
 {
-	struct hwrm_wol_filter_free_input req = {0};
+	struct hwrm_wol_filter_free_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
-	req.port_id = cpu_to_le16(bp->pf.port_id);
-	req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
-	req.wol_filter_id = bp->wol_filter_id;
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(bp->pf.port_id);
+	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+	req->wol_filter_id = bp->wol_filter_id;
+
+	return hwrm_req_send(bp, req);
 }
 
 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
 {
-	struct hwrm_wol_filter_qcfg_input req = {0};
-	struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_wol_filter_qcfg_output *resp;
+	struct hwrm_wol_filter_qcfg_input *req;
 	u16 next_handle = 0;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
-	req.port_id = cpu_to_le16(bp->pf.port_id);
-	req.handle = cpu_to_le16(handle);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(bp->pf.port_id);
+	req->handle = cpu_to_le16(handle);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		next_handle = le16_to_cpu(resp->next_handle);
 		if (next_handle != 0) {
@@ -9722,7 +9911,7 @@  static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
 			}
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return next_handle;
 }
 
@@ -9743,19 +9932,20 @@  static void bnxt_get_wol_settings(struct bnxt *bp)
 static ssize_t bnxt_show_temp(struct device *dev,
 			      struct device_attribute *devattr, char *buf)
 {
-	struct hwrm_temp_monitor_query_input req = {0};
 	struct hwrm_temp_monitor_query_output *resp;
+	struct hwrm_temp_monitor_query_input *req;
 	struct bnxt *bp = dev_get_drvdata(dev);
 	u32 len = 0;
 	int rc;
 
-	resp = bp->hwrm_cmd_resp_addr;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+	if (rc)
+		return rc;
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	if (rc)
 		return rc;
 	return len;
@@ -9778,12 +9968,13 @@  static void bnxt_hwmon_close(struct bnxt *bp)
 
 static void bnxt_hwmon_open(struct bnxt *bp)
 {
-	struct hwrm_temp_monitor_query_input req = {0};
+	struct hwrm_temp_monitor_query_input *req;
 	struct pci_dev *pdev = bp->pdev;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
-	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+	if (!rc)
+		rc = hwrm_req_send_silent(bp, req);
 	if (rc == -EACCES || rc == -EOPNOTSUPP) {
 		bnxt_hwmon_close(bp);
 		return;
@@ -10220,53 +10411,60 @@  static int bnxt_close(struct net_device *dev)
 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
 				   u16 *val)
 {
-	struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_port_phy_mdio_read_input req = {0};
+	struct hwrm_port_phy_mdio_read_output *resp;
+	struct hwrm_port_phy_mdio_read_input *req;
 	int rc;
 
 	if (bp->hwrm_spec_code < 0x10a00)
 		return -EOPNOTSUPP;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
-	req.port_id = cpu_to_le16(bp->pf.port_id);
-	req.phy_addr = phy_addr;
-	req.reg_addr = cpu_to_le16(reg & 0x1f);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(bp->pf.port_id);
+	req->phy_addr = phy_addr;
+	req->reg_addr = cpu_to_le16(reg & 0x1f);
 	if (mdio_phy_id_is_c45(phy_addr)) {
-		req.cl45_mdio = 1;
-		req.phy_addr = mdio_phy_id_prtad(phy_addr);
-		req.dev_addr = mdio_phy_id_devad(phy_addr);
-		req.reg_addr = cpu_to_le16(reg);
+		req->cl45_mdio = 1;
+		req->phy_addr = mdio_phy_id_prtad(phy_addr);
+		req->dev_addr = mdio_phy_id_devad(phy_addr);
+		req->reg_addr = cpu_to_le16(reg);
 	}
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		*val = le16_to_cpu(resp->reg_data);
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
 				    u16 val)
 {
-	struct hwrm_port_phy_mdio_write_input req = {0};
+	struct hwrm_port_phy_mdio_write_input *req;
+	int rc;
 
 	if (bp->hwrm_spec_code < 0x10a00)
 		return -EOPNOTSUPP;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
-	req.port_id = cpu_to_le16(bp->pf.port_id);
-	req.phy_addr = phy_addr;
-	req.reg_addr = cpu_to_le16(reg & 0x1f);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(bp->pf.port_id);
+	req->phy_addr = phy_addr;
+	req->reg_addr = cpu_to_le16(reg & 0x1f);
 	if (mdio_phy_id_is_c45(phy_addr)) {
-		req.cl45_mdio = 1;
-		req.phy_addr = mdio_phy_id_prtad(phy_addr);
-		req.dev_addr = mdio_phy_id_devad(phy_addr);
-		req.reg_addr = cpu_to_le16(reg);
+		req->cl45_mdio = 1;
+		req->phy_addr = mdio_phy_id_prtad(phy_addr);
+		req->dev_addr = mdio_phy_id_devad(phy_addr);
+		req->reg_addr = cpu_to_le16(reg);
 	}
-	req.reg_data = cpu_to_le16(val);
+	req->reg_data = cpu_to_le16(val);
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 /* rtnl_lock held */
@@ -10508,6 +10706,7 @@  static int bnxt_cfg_rx_mode(struct bnxt *bp)
 {
 	struct net_device *dev = bp->dev;
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct hwrm_cfa_l2_filter_free_input *req;
 	struct netdev_hw_addr *ha;
 	int i, off = 0, rc;
 	bool uc_update;
@@ -10519,19 +10718,16 @@  static int bnxt_cfg_rx_mode(struct bnxt *bp)
 	if (!uc_update)
 		goto skip_uc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+	if (rc)
+		return rc;
+	hwrm_req_hold(bp, req);
 	for (i = 1; i < vnic->uc_filter_count; i++) {
-		struct hwrm_cfa_l2_filter_free_input req = {0};
-
-		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
-				       -1);
+		req->l2_filter_id = vnic->fw_l2_filter_id[i];
 
-		req.l2_filter_id = vnic->fw_l2_filter_id[i];
-
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 
 	vnic->uc_filter_count = 1;
 
@@ -10883,22 +11079,30 @@  static netdev_features_t bnxt_features_check(struct sk_buff *skb,
 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
 			 u32 *reg_buf)
 {
-	struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_dbg_read_direct_input req = {0};
+	struct hwrm_dbg_read_direct_output *resp;
+	struct hwrm_dbg_read_direct_input *req;
 	__le32 *dbg_reg_buf;
 	dma_addr_t mapping;
 	int rc, i;
 
-	dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
-					 &mapping, GFP_KERNEL);
-	if (!dbg_reg_buf)
-		return -ENOMEM;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
-	req.host_dest_addr = cpu_to_le64(mapping);
-	req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
-	req.read_len32 = cpu_to_le32(num_words);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
+	if (rc)
+		return rc;
+
+	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
+					 &mapping);
+	if (!dbg_reg_buf) {
+		rc = -ENOMEM;
+		goto dbg_rd_reg_exit;
+	}
+
+	req->host_dest_addr = cpu_to_le64(mapping);
+
+	resp = hwrm_req_hold(bp, req);
+	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+	req->read_len32 = cpu_to_le32(num_words);
+
+	rc = hwrm_req_send(bp, req);
 	if (rc || resp->error_code) {
 		rc = -EIO;
 		goto dbg_rd_reg_exit;
@@ -10907,28 +11111,30 @@  int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
 
 dbg_rd_reg_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
 				       u32 ring_id, u32 *prod, u32 *cons)
 {
-	struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_dbg_ring_info_get_input req = {0};
+	struct hwrm_dbg_ring_info_get_output *resp;
+	struct hwrm_dbg_ring_info_get_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
-	req.ring_type = ring_type;
-	req.fw_ring_id = cpu_to_le32(ring_id);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
+	if (rc)
+		return rc;
+
+	req->ring_type = ring_type;
+	req->fw_ring_id = cpu_to_le32(ring_id);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		*prod = le32_to_cpu(resp->producer_index);
 		*cons = le32_to_cpu(resp->consumer_index);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -10986,18 +11192,22 @@  static void bnxt_dbg_dump_states(struct bnxt *bp)
 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
 {
 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
-	struct hwrm_ring_reset_input req = {0};
+	struct hwrm_ring_reset_input *req;
 	struct bnxt_napi *bnapi = rxr->bnapi;
 	struct bnxt_cp_ring_info *cpr;
 	u16 cp_ring_id;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
+	if (rc)
+		return rc;
 
 	cpr = &bnapi->cp_ring;
 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
-	req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
-	req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
-	return hwrm_send_message_silent(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+	req->cmpl_ring = cpu_to_le16(cp_ring_id);
+	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
+	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
+	return hwrm_req_send_silent(bp, req);
 }
 
 static void bnxt_reset_task(struct bnxt *bp, bool silent)
@@ -11426,12 +11636,15 @@  static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
 static void bnxt_fw_echo_reply(struct bnxt *bp)
 {
 	struct bnxt_fw_health *fw_health = bp->fw_health;
-	struct hwrm_func_echo_response_input req = {0};
+	struct hwrm_func_echo_response_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
-	req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
-	req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
-	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
+	if (rc)
+		return;
+	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
+	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
+	hwrm_req_send(bp, req);
 }
 
 static void bnxt_sp_task(struct work_struct *work)
@@ -11810,14 +12023,16 @@  static void bnxt_reset_all(struct bnxt *bp)
 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
 			bnxt_fw_reset_writel(bp, i);
 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
-		struct hwrm_fw_reset_input req = {0};
-
-		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1,
-				       HWRM_TARGET_ID_KONG);
-		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
-		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
-		req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
-		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		struct hwrm_fw_reset_input *req;
+
+		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
+		if (!rc) {
+			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
+			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
+			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+			rc = hwrm_req_send(bp, req);
+		}
 		if (rc != -ENODEV)
 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
 	}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index df898665763a..228a5db7e143 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -39,38 +39,43 @@  static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
 
 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
 {
-	struct hwrm_queue_pri2cos_cfg_input req = {0};
+	struct hwrm_queue_pri2cos_cfg_input *req;
 	u8 *pri2cos;
-	int i;
+	int rc, i;
+
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
-	req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
-				QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
+	req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
+				 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
 
-	pri2cos = &req.pri0_cos_queue_id;
+	pri2cos = &req->pri0_cos_queue_id;
 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 		u8 qidx;
 
-		req.enables |= cpu_to_le32(
+		req->enables |= cpu_to_le32(
 			QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
 
 		qidx = bp->tc_to_qidx[ets->prio_tc[i]];
 		pri2cos[i] = bp->q_info[qidx].queue_id;
 	}
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
 {
-	struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_queue_pri2cos_qcfg_input req = {0};
-	int rc = 0;
+	struct hwrm_queue_pri2cos_qcfg_output *resp;
+	struct hwrm_queue_pri2cos_qcfg_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
-	req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		u8 *pri2cos = &resp->pri0_cos_queue_id;
 		int i;
@@ -84,23 +89,26 @@  static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
 				ets->prio_tc[i] = tc;
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
 				      u8 max_tc)
 {
-	struct hwrm_queue_cos2bw_cfg_input req = {0};
+	struct hwrm_queue_cos2bw_cfg_input *req;
 	struct bnxt_cos2bw_cfg cos2bw;
 	void *data;
-	int i;
+	int rc, i;
+
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
 	for (i = 0; i < max_tc; i++) {
 		u8 qidx = bp->tc_to_qidx[i];
 
-		req.enables |= cpu_to_le32(
+		req->enables |= cpu_to_le32(
 			QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
 			qidx);
 
@@ -121,30 +129,32 @@  static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
 				cpu_to_le32((ets->tc_tx_bw[i] * 100) |
 					    BW_VALUE_UNIT_PERCENT1_100);
 		}
-		data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
+		data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
 		memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
 		if (qidx == 0) {
-			req.queue_id0 = cos2bw.queue_id;
-			req.unused_0 = 0;
+			req->queue_id0 = cos2bw.queue_id;
+			req->unused_0 = 0;
 		}
 	}
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
 {
-	struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_queue_cos2bw_qcfg_input req = {0};
+	struct hwrm_queue_cos2bw_qcfg_output *resp;
+	struct hwrm_queue_cos2bw_qcfg_input *req;
 	struct bnxt_cos2bw_cfg cos2bw;
 	void *data;
 	int rc, i;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
+		hwrm_req_drop(bp, req);
 		return rc;
 	}
 
@@ -168,7 +178,7 @@  static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
 			ets->tc_tx_bw[tc] = cos2bw.bw_weight;
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return 0;
 }
 
@@ -230,11 +240,12 @@  static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
 
 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
 {
-	struct hwrm_queue_pfcenable_cfg_input req = {0};
+	struct hwrm_queue_pfcenable_cfg_input *req;
 	struct ieee_ets *my_ets = bp->ieee_ets;
 	unsigned int tc_mask = 0, pri_mask = 0;
 	u8 i, pri, lltc_count = 0;
 	bool need_q_remap = false;
+	int rc;
 
 	if (!my_ets)
 		return -EINVAL;
@@ -267,38 +278,43 @@  static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
 	if (need_q_remap)
 		bnxt_queue_remap(bp, tc_mask);
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
-	req.flags = cpu_to_le32(pri_mask);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG);
+	if (rc)
+		return rc;
+
+	req->flags = cpu_to_le32(pri_mask);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
 {
-	struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_queue_pfcenable_qcfg_input req = {0};
+	struct hwrm_queue_pfcenable_qcfg_output *resp;
+	struct hwrm_queue_pfcenable_qcfg_input *req;
 	u8 pri_mask;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (rc) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
+		hwrm_req_drop(bp, req);
 		return rc;
 	}
 
 	pri_mask = le32_to_cpu(resp->flags);
 	pfc->pfc_en = pri_mask;
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return 0;
 }
 
 static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 				  bool add)
 {
-	struct hwrm_fw_set_structured_data_input set = {0};
-	struct hwrm_fw_get_structured_data_input get = {0};
+	struct hwrm_fw_set_structured_data_input *set;
+	struct hwrm_fw_get_structured_data_input *get;
 	struct hwrm_struct_data_dcbx_app *fw_app;
 	struct hwrm_struct_hdr *data;
 	dma_addr_t mapping;
@@ -308,19 +324,26 @@  static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 	if (bp->hwrm_spec_code < 0x10601)
 		return 0;
 
+	rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA);
+	if (rc)
+		return rc;
+
+	hwrm_req_hold(bp, get);
+	hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO);
+
 	n = IEEE_8021QAZ_MAX_TCS;
 	data_len = sizeof(*data) + sizeof(*fw_app) * n;
-	data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
-				  GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
+	data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
+	if (!data) {
+		rc = -ENOMEM;
+		goto set_app_exit;
+	}
 
-	bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
-	get.dest_data_addr = cpu_to_le64(mapping);
-	get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
-	get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
-	get.count = 0;
-	rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
+	get->dest_data_addr = cpu_to_le64(mapping);
+	get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
+	get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+	get->count = 0;
+	rc = hwrm_req_send(bp, get);
 	if (rc)
 		goto set_app_exit;
 
@@ -366,44 +389,49 @@  static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 	data->len = cpu_to_le16(sizeof(*fw_app) * n);
 	data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
 
-	bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
-	set.src_data_addr = cpu_to_le64(mapping);
-	set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
-	set.hdr_cnt = 1;
-	rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA);
+	if (rc)
+		goto set_app_exit;
+
+	set->src_data_addr = cpu_to_le64(mapping);
+	set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
+	set->hdr_cnt = 1;
+	rc = hwrm_req_send(bp, set);
 
 set_app_exit:
-	dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
+	hwrm_req_drop(bp, get); /* dropping get request and associated slice */
 	return rc;
 }
 
 static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
 {
-	struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_queue_dscp_qcaps_input req = {0};
+	struct hwrm_queue_dscp_qcaps_output *resp;
+	struct hwrm_queue_dscp_qcaps_input *req;
 	int rc;
 
 	bp->max_dscp_value = 0;
 	if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
 	if (!rc) {
 		bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
 		if (bp->max_dscp_value < 0x3f)
 			bp->max_dscp_value = 0;
 	}
-
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
 static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
 					bool add)
 {
-	struct hwrm_queue_dscp2pri_cfg_input req = {0};
+	struct hwrm_queue_dscp2pri_cfg_input *req;
 	struct bnxt_dscp2pri_entry *dscp2pri;
 	dma_addr_t mapping;
 	int rc;
@@ -411,23 +439,25 @@  static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
 	if (bp->hwrm_spec_code < 0x10800)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
-	dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
-				      &mapping, GFP_KERNEL);
-	if (!dscp2pri)
+	rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG);
+	if (rc)
+		return rc;
+
+	dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
+	if (!dscp2pri) {
+		hwrm_req_drop(bp, req);
 		return -ENOMEM;
+	}
 
-	req.src_data_addr = cpu_to_le64(mapping);
+	req->src_data_addr = cpu_to_le64(mapping);
 	dscp2pri->dscp = app->protocol;
 	if (add)
 		dscp2pri->mask = 0x3f;
 	else
 		dscp2pri->mask = 0;
 	dscp2pri->pri = app->priority;
-	req.entry_cnt = cpu_to_le16(1);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
-			  mapping);
+	req->entry_cnt = cpu_to_le16(1);
+	rc = hwrm_req_send(bp, req);
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 00b284a028c5..1423cc617d93 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -355,28 +355,34 @@  static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
 static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
 				     union devlink_param_value *nvm_cfg_ver)
 {
-	struct hwrm_nvm_get_variable_input req = {0};
+	struct hwrm_nvm_get_variable_input *req;
 	union bnxt_nvm_data *data;
 	dma_addr_t data_dma_addr;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
-	data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
-				  &data_dma_addr, GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
+	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+	if (rc)
+		return rc;
+
+	data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
+	if (!data) {
+		rc = -ENOMEM;
+		goto exit;
+	}
 
-	req.dest_data_addr = cpu_to_le64(data_dma_addr);
-	req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
-	req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+	hwrm_req_hold(bp, req);
+	req->dest_data_addr = cpu_to_le64(data_dma_addr);
+	req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
+	req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
 
-	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send_silent(bp, req);
 	if (!rc)
 		bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
 					BNXT_NVM_CFG_VER_BITS,
 					BNXT_NVM_CFG_VER_BYTES);
 
-	dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+exit:
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -563,17 +569,20 @@  static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
 }
 
 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
-			     int msg_len, union devlink_param_value *val)
+			     union devlink_param_value *val)
 {
 	struct hwrm_nvm_get_variable_input *req = msg;
 	struct bnxt_dl_nvm_param nvm_param;
+	struct hwrm_err_output *resp;
 	union bnxt_nvm_data *data;
 	dma_addr_t data_dma_addr;
 	int idx = 0, rc, i;
 
 	/* Get/Set NVM CFG parameter is supported only on PFs */
-	if (BNXT_VF(bp))
+	if (BNXT_VF(bp)) {
+		hwrm_req_drop(bp, req);
 		return -EPERM;
+	}
 
 	for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
 		if (nvm_params[i].id == param_id) {
@@ -582,18 +591,22 @@  static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
 		}
 	}
 
-	if (i == ARRAY_SIZE(nvm_params))
+	if (i == ARRAY_SIZE(nvm_params)) {
+		hwrm_req_drop(bp, req);
 		return -EOPNOTSUPP;
+	}
 
 	if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
 		idx = bp->pf.port_id;
 	else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
 		idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
 
-	data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
-				  &data_dma_addr, GFP_KERNEL);
-	if (!data)
+	data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
+
+	if (!data) {
+		hwrm_req_drop(bp, req);
 		return -ENOMEM;
+	}
 
 	req->dest_data_addr = cpu_to_le64(data_dma_addr);
 	req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
@@ -602,26 +615,24 @@  static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
 	if (idx)
 		req->dimensions = cpu_to_le16(1);
 
+	resp = hwrm_req_hold(bp, req);
 	if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
 		bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
 				      nvm_param.dl_num_bytes);
-		rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, msg);
 	} else {
-		rc = hwrm_send_message_silent(bp, msg, msg_len,
-					      HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send_silent(bp, msg);
 		if (!rc) {
 			bnxt_copy_from_nvm_data(val, data,
 						nvm_param.nvm_num_bits,
 						nvm_param.dl_num_bytes);
 		} else {
-			struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
-
 			if (resp->cmd_err ==
 				NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
 				rc = -EOPNOTSUPP;
 		}
 	}
-	dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+	hwrm_req_drop(bp, req);
 	if (rc == -EACCES)
 		netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
 	return rc;
@@ -630,15 +641,17 @@  static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
 static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
 				 struct devlink_param_gset_ctx *ctx)
 {
-	struct hwrm_nvm_get_variable_input req = {0};
 	struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+	struct hwrm_nvm_get_variable_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
-	rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
-	if (!rc)
-		if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
-			ctx->val.vbool = !ctx->val.vbool;
+	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+	if (rc)
+		return rc;
+
+	rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
+	if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+		ctx->val.vbool = !ctx->val.vbool;
 
 	return rc;
 }
@@ -646,15 +659,18 @@  static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
 static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
 				 struct devlink_param_gset_ctx *ctx)
 {
-	struct hwrm_nvm_set_variable_input req = {0};
 	struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+	struct hwrm_nvm_set_variable_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE);
+	if (rc)
+		return rc;
 
 	if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
 		ctx->val.vbool = !ctx->val.vbool;
 
-	return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+	return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
 }
 
 static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b6aaf14bd7fd..b056e3c29bbd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1366,7 +1366,7 @@  static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 			  void *_p)
 {
 	struct pcie_ctx_hw_stats *hw_pcie_stats;
-	struct hwrm_pcie_qstats_input req = {0};
+	struct hwrm_pcie_qstats_input *req;
 	struct bnxt *bp = netdev_priv(dev);
 	dma_addr_t hw_pcie_stats_addr;
 	int rc;
@@ -1377,18 +1377,21 @@  static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 	if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
 		return;
 
-	hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
-					   sizeof(*hw_pcie_stats),
-					   &hw_pcie_stats_addr, GFP_KERNEL);
-	if (!hw_pcie_stats)
+	if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
 		return;
 
+	hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+					   &hw_pcie_stats_addr);
+	if (!hw_pcie_stats) {
+		hwrm_req_drop(bp, req);
+		return;
+	}
+
 	regs->version = 1;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
-	req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
-	req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	hwrm_req_hold(bp, req); /* hold on to slice */
+	req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+	req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		__le64 *src = (__le64 *)hw_pcie_stats;
 		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
@@ -1397,9 +1400,7 @@  static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
 			dst[i] = le64_to_cpu(src[i]);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
-			  hw_pcie_stats_addr);
+	hwrm_req_drop(bp, req);
 }
 
 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1979,7 +1980,7 @@  static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
 static int bnxt_set_fecparam(struct net_device *dev,
 			     struct ethtool_fecparam *fecparam)
 {
-	struct hwrm_port_phy_cfg_input req = {0};
+	struct hwrm_port_phy_cfg_input *req;
 	struct bnxt *bp = netdev_priv(dev);
 	struct bnxt_link_info *link_info;
 	u32 new_cfg, fec = fecparam->fec;
@@ -2011,9 +2012,11 @@  static int bnxt_set_fecparam(struct net_device *dev,
 	}
 
 apply_fec:
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
-	req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+	if (rc)
+		return rc;
+	req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+	rc = hwrm_req_send(bp, req);
 	/* update current settings */
 	if (!rc) {
 		mutex_lock(&bp->link_lock);
@@ -2107,19 +2110,22 @@  static u32 bnxt_get_link(struct net_device *dev)
 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
 			       struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
 {
-	struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_nvm_get_dev_info_input req = {0};
+	struct hwrm_nvm_get_dev_info_output *resp;
+	struct hwrm_nvm_get_dev_info_input *req;
 	int rc;
 
 	if (BNXT_VF(bp))
 		return -EOPNOTSUPP;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		memcpy(nvm_dev_info, resp, sizeof(*resp));
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -2132,77 +2138,67 @@  static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
 				u16 ext, u16 *index, u32 *item_length,
 				u32 *data_length);
 
-static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
-			      u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
-			      u32 dir_item_len, const u8 *data,
-			      size_t data_len)
+static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+			    u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+			    u32 dir_item_len, const u8 *data,
+			    size_t data_len)
 {
 	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_nvm_write_input *req;
 	int rc;
-	struct hwrm_nvm_write_input req = {0};
-	dma_addr_t dma_handle;
-	u8 *kmem = NULL;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
+	if (rc)
+		return rc;
 
-	req.dir_type = cpu_to_le16(dir_type);
-	req.dir_ordinal = cpu_to_le16(dir_ordinal);
-	req.dir_ext = cpu_to_le16(dir_ext);
-	req.dir_attr = cpu_to_le16(dir_attr);
-	req.dir_item_length = cpu_to_le32(dir_item_len);
 	if (data_len && data) {
-		req.dir_data_length = cpu_to_le32(data_len);
+		dma_addr_t dma_handle;
+		u8 *kmem;
 
-		kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
-					  GFP_KERNEL);
-		if (!kmem)
+		kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
+		if (!kmem) {
+			hwrm_req_drop(bp, req);
 			return -ENOMEM;
+		}
+
+		req->dir_data_length = cpu_to_le32(data_len);
 
 		memcpy(kmem, data, data_len);
-		req.host_src_addr = cpu_to_le64(dma_handle);
+		req->host_src_addr = cpu_to_le64(dma_handle);
 	}
 
-	rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
-	if (kmem)
-		dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+	hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT);
+	req->dir_type = cpu_to_le16(dir_type);
+	req->dir_ordinal = cpu_to_le16(dir_ordinal);
+	req->dir_ext = cpu_to_le16(dir_ext);
+	req->dir_attr = cpu_to_le16(dir_attr);
+	req->dir_item_length = cpu_to_le32(dir_item_len);
+	rc = hwrm_req_send(bp, req);
 
 	if (rc == -EACCES)
 		bnxt_print_admin_err(bp);
 	return rc;
 }
 
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
-			    u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
-			    const u8 *data, size_t data_len)
-{
-	struct bnxt *bp = netdev_priv(dev);
-	int rc;
-
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr,
-				0, data, data_len);
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	return rc;
-}
-
 static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
 				    u8 self_reset, u8 flags)
 {
-	struct hwrm_fw_reset_input req = {0};
 	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_fw_reset_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
+	if (rc)
+		return rc;
 
-	req.embedded_proc_type = proc_type;
-	req.selfrst_status = self_reset;
-	req.flags = flags;
+	req->embedded_proc_type = proc_type;
+	req->selfrst_status = self_reset;
+	req->flags = flags;
 
 	if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
-		rc = hwrm_send_message_silent(bp, &req, sizeof(req),
-					      HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send_silent(bp, req);
 	} else {
-		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
 		if (rc == -EACCES)
 			bnxt_print_admin_err(bp);
 	}
@@ -2340,7 +2336,7 @@  static int bnxt_flash_firmware(struct net_device *dev,
 		return -EINVAL;
 	}
 	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
-			      0, 0, fw_data, fw_size);
+			      0, 0, 0, fw_data, fw_size);
 	if (rc == 0)	/* Firmware update successful */
 		rc = bnxt_firmware_reset(dev, dir_type);
 
@@ -2393,7 +2389,7 @@  static int bnxt_flash_microcode(struct net_device *dev,
 		return -EINVAL;
 	}
 	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
-			      0, 0, fw_data, fw_size);
+			      0, 0, 0, fw_data, fw_size);
 
 	return rc;
 }
@@ -2459,7 +2455,7 @@  static int bnxt_flash_firmware_from_file(struct net_device *dev,
 		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
 	else
 		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
-				      0, 0, fw->data, fw->size);
+				      0, 0, 0, fw->data, fw->size);
 	release_firmware(fw);
 	return rc;
 }
@@ -2471,21 +2467,23 @@  static int bnxt_flash_firmware_from_file(struct net_device *dev,
 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
 				   u32 install_type)
 {
-	struct hwrm_nvm_install_update_input install = {0};
-	struct hwrm_nvm_install_update_output resp = {0};
-	struct hwrm_nvm_modify_input modify = {0};
+	struct hwrm_nvm_install_update_input *install;
+	struct hwrm_nvm_install_update_output *resp;
+	struct hwrm_nvm_modify_input *modify;
 	struct bnxt *bp = netdev_priv(dev);
 	bool defrag_attempted = false;
 	dma_addr_t dma_handle;
 	u8 *kmem = NULL;
 	u32 modify_len;
 	u32 item_len;
-	int rc = 0;
 	u16 index;
+	int rc;
 
 	bnxt_hwrm_fw_set_time(bp);
 
-	bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+	rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
+	if (rc)
+		return rc;
 
 	/* Try allocating a large DMA buffer first.  Older fw will
 	 * cause excessive NVRAM erases when using small blocks.
@@ -2493,22 +2491,33 @@  int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
 	modify_len = roundup_pow_of_two(fw->size);
 	modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
 	while (1) {
-		kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len,
-					  &dma_handle, GFP_KERNEL);
+		kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
 		if (!kmem && modify_len > PAGE_SIZE)
 			modify_len /= 2;
 		else
 			break;
 	}
-	if (!kmem)
+	if (!kmem) {
+		hwrm_req_drop(bp, modify);
 		return -ENOMEM;
+	}
 
-	modify.host_src_addr = cpu_to_le64(dma_handle);
+	rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
+	if (rc) {
+		hwrm_req_drop(bp, modify);
+		return rc;
+	}
+
+	hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT);
+	hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT);
 
-	bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+	hwrm_req_hold(bp, modify);
+	modify->host_src_addr = cpu_to_le64(dma_handle);
+
+	resp = hwrm_req_hold(bp, install);
 	if ((install_type & 0xffff) == 0)
 		install_type >>= 16;
-	install.install_type = cpu_to_le32(install_type);
+	install->install_type = cpu_to_le32(install_type);
 
 	do {
 		u32 copied = 0, len = modify_len;
@@ -2528,76 +2537,69 @@  int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
 			break;
 		}
 
-		modify.dir_idx = cpu_to_le16(index);
+		modify->dir_idx = cpu_to_le16(index);
 
 		if (fw->size > modify_len)
-			modify.flags = BNXT_NVM_MORE_FLAG;
+			modify->flags = BNXT_NVM_MORE_FLAG;
 		while (copied < fw->size) {
 			u32 balance = fw->size - copied;
 
 			if (balance <= modify_len) {
 				len = balance;
 				if (copied)
-					modify.flags |= BNXT_NVM_LAST_FLAG;
+					modify->flags |= BNXT_NVM_LAST_FLAG;
 			}
 			memcpy(kmem, fw->data + copied, len);
-			modify.len = cpu_to_le32(len);
-			modify.offset = cpu_to_le32(copied);
-			rc = hwrm_send_message(bp, &modify, sizeof(modify),
-					       FLASH_PACKAGE_TIMEOUT);
+			modify->len = cpu_to_le32(len);
+			modify->offset = cpu_to_le32(copied);
+			rc = hwrm_req_send(bp, modify);
 			if (rc)
 				goto pkg_abort;
 			copied += len;
 		}
-		mutex_lock(&bp->hwrm_cmd_lock);
-		rc = _hwrm_send_message_silent(bp, &install, sizeof(install),
-					       INSTALL_PACKAGE_TIMEOUT);
-		memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
+
+		rc = hwrm_req_send_silent(bp, install);
 
 		if (defrag_attempted) {
 			/* We have tried to defragment already in the previous
 			 * iteration. Return with the result for INSTALL_UPDATE
 			 */
-			mutex_unlock(&bp->hwrm_cmd_lock);
 			break;
 		}
 
-		if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
+		if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
 		    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
-			install.flags =
+			install->flags =
 				cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
 
-			rc = _hwrm_send_message_silent(bp, &install,
-						       sizeof(install),
-						       INSTALL_PACKAGE_TIMEOUT);
-			memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
+			rc = hwrm_req_send_silent(bp, install);
 
-			if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
+			if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
 			    NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
 				/* FW has cleared NVM area, driver will create
 				 * UPDATE directory and try the flash again
 				 */
 				defrag_attempted = true;
-				install.flags = 0;
-				rc = __bnxt_flash_nvram(bp->dev,
-							BNX_DIR_TYPE_UPDATE,
-							BNX_DIR_ORDINAL_FIRST,
-							0, 0, item_len, NULL,
-							0);
+				install->flags = 0;
+				rc = bnxt_flash_nvram(bp->dev,
+						      BNX_DIR_TYPE_UPDATE,
+						      BNX_DIR_ORDINAL_FIRST,
+						      0, 0, item_len, NULL, 0);
 			} else if (rc) {
 				netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
 			}
 		} else if (rc) {
 			netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
 		}
-		mutex_unlock(&bp->hwrm_cmd_lock);
 	} while (defrag_attempted && !rc);
 
 pkg_abort:
-	dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle);
-	if (resp.result) {
+	hwrm_req_drop(bp, modify);
+	hwrm_req_drop(bp, install);
+
+	if (resp->result) {
 		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
-			   (s8)resp.result, (int)resp.problem_item);
+			   (s8)resp->result, (int)resp->problem_item);
 		rc = -ENOPKG;
 	}
 	if (rc == -EACCES)
@@ -2643,20 +2645,22 @@  static int bnxt_flash_device(struct net_device *dev,
 
 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
 {
+	struct hwrm_nvm_get_dir_info_output *output;
+	struct hwrm_nvm_get_dir_info_input *req;
 	struct bnxt *bp = netdev_priv(dev);
 	int rc;
-	struct hwrm_nvm_get_dir_info_input req = {0};
-	struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	output = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		*entries = le32_to_cpu(output->entries);
 		*length = le32_to_cpu(output->entry_length);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -2682,7 +2686,7 @@  static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
 	u8 *buf;
 	size_t buflen;
 	dma_addr_t dma_handle;
-	struct hwrm_nvm_get_dir_entries_input req = {0};
+	struct hwrm_nvm_get_dir_entries_input *req;
 
 	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
 	if (rc != 0)
@@ -2700,20 +2704,23 @@  static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
 	len -= 2;
 	memset(data, 0xff, len);
 
+	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
+	if (rc)
+		return rc;
+
 	buflen = dir_entries * entry_length;
-	buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
-				 GFP_KERNEL);
+	buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
 	if (!buf) {
-		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
-			   (unsigned)buflen);
+		hwrm_req_drop(bp, req);
 		return -ENOMEM;
 	}
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
-	req.host_dest_addr = cpu_to_le64(dma_handle);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->host_dest_addr = cpu_to_le64(dma_handle);
+
+	hwrm_req_hold(bp, req); /* hold the slice */
+	rc = hwrm_req_send(bp, req);
 	if (rc == 0)
 		memcpy(data, buf, len > buflen ? buflen : len);
-	dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -2724,28 +2731,31 @@  static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
 	int rc;
 	u8 *buf;
 	dma_addr_t dma_handle;
-	struct hwrm_nvm_read_input req = {0};
+	struct hwrm_nvm_read_input *req;
 
 	if (!length)
 		return -EINVAL;
 
-	buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
-				 GFP_KERNEL);
+	rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
+	if (rc)
+		return rc;
+
+	buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
 	if (!buf) {
-		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
-			   (unsigned)length);
+		hwrm_req_drop(bp, req);
 		return -ENOMEM;
 	}
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
-	req.host_dest_addr = cpu_to_le64(dma_handle);
-	req.dir_idx = cpu_to_le16(index);
-	req.offset = cpu_to_le32(offset);
-	req.len = cpu_to_le32(length);
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->host_dest_addr = cpu_to_le64(dma_handle);
+	req->dir_idx = cpu_to_le16(index);
+	req->offset = cpu_to_le32(offset);
+	req->len = cpu_to_le32(length);
+
+	hwrm_req_hold(bp, req); /* hold the slice */
+	rc = hwrm_req_send(bp, req);
 	if (rc == 0)
 		memcpy(data, buf, length);
-	dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -2753,20 +2763,23 @@  static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
 				u16 ext, u16 *index, u32 *item_length,
 				u32 *data_length)
 {
+	struct hwrm_nvm_find_dir_entry_output *output;
+	struct hwrm_nvm_find_dir_entry_input *req;
 	struct bnxt *bp = netdev_priv(dev);
 	int rc;
-	struct hwrm_nvm_find_dir_entry_input req = {0};
-	struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
-
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
-	req.enables = 0;
-	req.dir_idx = 0;
-	req.dir_type = cpu_to_le16(type);
-	req.dir_ordinal = cpu_to_le16(ordinal);
-	req.dir_ext = cpu_to_le16(ext);
-	req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
+	if (rc)
+		return rc;
+
+	req->enables = 0;
+	req->dir_idx = 0;
+	req->dir_type = cpu_to_le16(type);
+	req->dir_ordinal = cpu_to_le16(ordinal);
+	req->dir_ext = cpu_to_le16(ext);
+	req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
+	output = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
 	if (rc == 0) {
 		if (index)
 			*index = le16_to_cpu(output->dir_idx);
@@ -2775,7 +2788,7 @@  static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
 		if (data_length)
 			*data_length = le32_to_cpu(output->dir_data_length);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -2870,12 +2883,16 @@  static int bnxt_get_eeprom(struct net_device *dev,
 
 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
 {
+	struct hwrm_nvm_erase_dir_entry_input *req;
 	struct bnxt *bp = netdev_priv(dev);
-	struct hwrm_nvm_erase_dir_entry_input req = {0};
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
-	req.dir_idx = cpu_to_le16(index);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
+	if (rc)
+		return rc;
+
+	req->dir_idx = cpu_to_le16(index);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_set_eeprom(struct net_device *dev,
@@ -2915,7 +2932,7 @@  static int bnxt_set_eeprom(struct net_device *dev,
 	ordinal = eeprom->offset >> 16;
 	attr = eeprom->offset & 0xffff;
 
-	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
 				eeprom->len);
 }
 
@@ -3003,31 +3020,33 @@  static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
 					    u16 page_number, u16 start_addr,
 					    u16 data_length, u8 *buf)
 {
-	struct hwrm_port_phy_i2c_read_input req = {0};
-	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+	struct hwrm_port_phy_i2c_read_output *output;
+	struct hwrm_port_phy_i2c_read_input *req;
 	int rc, byte_offset = 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
-	req.i2c_slave_addr = i2c_addr;
-	req.page_number = cpu_to_le16(page_number);
-	req.port_id = cpu_to_le16(bp->pf.port_id);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
+	if (rc)
+		return rc;
+
+	output = hwrm_req_hold(bp, req);
+	req->i2c_slave_addr = i2c_addr;
+	req->page_number = cpu_to_le16(page_number);
+	req->port_id = cpu_to_le16(bp->pf.port_id);
 	do {
 		u16 xfer_size;
 
 		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
 		data_length -= xfer_size;
-		req.page_offset = cpu_to_le16(start_addr + byte_offset);
-		req.data_length = xfer_size;
-		req.enables = cpu_to_le32(start_addr + byte_offset ?
+		req->page_offset = cpu_to_le16(start_addr + byte_offset);
+		req->data_length = xfer_size;
+		req->enables = cpu_to_le32(start_addr + byte_offset ?
 				 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
-		mutex_lock(&bp->hwrm_cmd_lock);
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
 		if (!rc)
 			memcpy(buf + byte_offset, output->data, xfer_size);
-		mutex_unlock(&bp->hwrm_cmd_lock);
 		byte_offset += xfer_size;
 	} while (!rc && data_length > 0);
+	hwrm_req_drop(bp, req);
 
 	return rc;
 }
@@ -3136,13 +3155,13 @@  static int bnxt_nway_reset(struct net_device *dev)
 static int bnxt_set_phys_id(struct net_device *dev,
 			    enum ethtool_phys_id_state state)
 {
-	struct hwrm_port_led_cfg_input req = {0};
+	struct hwrm_port_led_cfg_input *req;
 	struct bnxt *bp = netdev_priv(dev);
 	struct bnxt_pf_info *pf = &bp->pf;
 	struct bnxt_led_cfg *led_cfg;
 	u8 led_state;
 	__le16 duration;
-	int i;
+	int rc, i;
 
 	if (!bp->num_leds || BNXT_VF(bp))
 		return -EOPNOTSUPP;
@@ -3156,27 +3175,35 @@  static int bnxt_set_phys_id(struct net_device *dev,
 	} else {
 		return -EINVAL;
 	}
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
-	req.port_id = cpu_to_le16(pf->port_id);
-	req.num_leds = bp->num_leds;
-	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
+	if (rc)
+		return rc;
+
+	req->port_id = cpu_to_le16(pf->port_id);
+	req->num_leds = bp->num_leds;
+	led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
 	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
-		req.enables |= BNXT_LED_DFLT_ENABLES(i);
+		req->enables |= BNXT_LED_DFLT_ENABLES(i);
 		led_cfg->led_id = bp->leds[i].led_id;
 		led_cfg->led_state = led_state;
 		led_cfg->led_blink_on = duration;
 		led_cfg->led_blink_off = duration;
 		led_cfg->led_group_id = bp->leds[i].led_group_id;
 	}
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
 {
-	struct hwrm_selftest_irq_input req = {0};
+	struct hwrm_selftest_irq_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
+	if (rc)
+		return rc;
+
+	req->cmpl_ring = cpu_to_le16(cmpl_ring);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_test_irq(struct bnxt *bp)
@@ -3196,31 +3223,37 @@  static int bnxt_test_irq(struct bnxt *bp)
 
 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
 {
-	struct hwrm_port_mac_cfg_input req = {0};
+	struct hwrm_port_mac_cfg_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+	if (rc)
+		return rc;
 
-	req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
 	if (enable)
-		req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+		req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
 	else
-		req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
 {
-	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_port_phy_qcaps_input req = {0};
+	struct hwrm_port_phy_qcaps_output *resp;
+	struct hwrm_port_phy_qcaps_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
+	if (rc)
+		return rc;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
 
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -3255,7 +3288,7 @@  static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
 	req->force_link_speed = cpu_to_le16(fw_speed);
 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
 				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
-	rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send(bp, req);
 	req->flags = 0;
 	req->force_link_speed = cpu_to_le16(0);
 	return rc;
@@ -3263,21 +3296,29 @@  static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
 
 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
 {
-	struct hwrm_port_phy_cfg_input req = {0};
+	struct hwrm_port_phy_cfg_input *req;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+	/* prevent bnxt_disable_an_for_lpbk() from consuming the request */
+	hwrm_req_hold(bp, req);
 
 	if (enable) {
-		bnxt_disable_an_for_lpbk(bp, &req);
+		bnxt_disable_an_for_lpbk(bp, req);
 		if (ext)
-			req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
+			req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
 		else
-			req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+			req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
 	} else {
-		req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+		req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
 	}
-	req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+	rc = hwrm_req_send(bp, req);
+	hwrm_req_drop(bp, req);
+	return rc;
 }
 
 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -3395,17 +3436,21 @@  static int bnxt_run_loopback(struct bnxt *bp)
 
 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
 {
-	struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_selftest_exec_input req = {0};
+	struct hwrm_selftest_exec_output *resp;
+	struct hwrm_selftest_exec_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	resp->test_success = 0;
-	req.flags = test_mask;
-	rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
+	if (rc)
+		return rc;
+
+	hwrm_req_timeout(bp, req, bp->test_info->timeout);
+	req->flags = test_mask;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	*test_results = resp->test_success;
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -3564,32 +3609,34 @@  static int bnxt_reset(struct net_device *dev, u32 *flags)
 	return 0;
 }
 
-static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
 				  struct bnxt_hwrm_dbg_dma_info *info)
 {
-	struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
 	struct hwrm_dbg_cmn_input *cmn_req = msg;
 	__le16 *seq_ptr = msg + info->seq_off;
+	struct hwrm_dbg_cmn_output *cmn_resp;
 	u16 seq = 0, len, segs_off;
-	void *resp = cmn_resp;
 	dma_addr_t dma_handle;
+	void *dma_buf, *resp;
 	int rc, off = 0;
-	void *dma_buf;
 
-	dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
-				     GFP_KERNEL);
-	if (!dma_buf)
+	dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
+	if (!dma_buf) {
+		hwrm_req_drop(bp, msg);
 		return -ENOMEM;
+	}
+
+	hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
+	cmn_resp = hwrm_req_hold(bp, msg);
+	resp = cmn_resp;
 
 	segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
 			    total_segments);
 	cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
 	cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
-	mutex_lock(&bp->hwrm_cmd_lock);
 	while (1) {
 		*seq_ptr = cpu_to_le16(seq);
-		rc = _hwrm_send_message(bp, msg, msg_len,
-					HWRM_COREDUMP_TIMEOUT);
+		rc = hwrm_req_send(bp, msg);
 		if (rc)
 			break;
 
@@ -3633,26 +3680,27 @@  static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
 		seq++;
 		off += len;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
+	hwrm_req_drop(bp, msg);
 	return rc;
 }
 
 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
 				       struct bnxt_coredump *coredump)
 {
-	struct hwrm_dbg_coredump_list_input req = {0};
 	struct bnxt_hwrm_dbg_dma_info info = {NULL};
+	struct hwrm_dbg_coredump_list_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
+	if (rc)
+		return rc;
 
 	info.dma_len = COREDUMP_LIST_BUF_LEN;
 	info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
 	info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
 				     data_len);
 
-	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+	rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
 	if (!rc) {
 		coredump->data = info.dest_buf;
 		coredump->data_size = info.dest_buf_size;
@@ -3664,26 +3712,34 @@  static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
 					   u16 segment_id)
 {
-	struct hwrm_dbg_coredump_initiate_input req = {0};
+	struct hwrm_dbg_coredump_initiate_input *req;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
-	req.component_id = cpu_to_le16(component_id);
-	req.segment_id = cpu_to_le16(segment_id);
+	hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
+	req->component_id = cpu_to_le16(component_id);
+	req->segment_id = cpu_to_le16(segment_id);
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
 					   u16 segment_id, u32 *seg_len,
 					   void *buf, u32 buf_len, u32 offset)
 {
-	struct hwrm_dbg_coredump_retrieve_input req = {0};
+	struct hwrm_dbg_coredump_retrieve_input *req;
 	struct bnxt_hwrm_dbg_dma_info info = {NULL};
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
-	req.component_id = cpu_to_le16(component_id);
-	req.segment_id = cpu_to_le16(segment_id);
+	rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
+	if (rc)
+		return rc;
+
+	req->component_id = cpu_to_le16(component_id);
+	req->segment_id = cpu_to_le16(segment_id);
 
 	info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
 	info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
@@ -3696,7 +3752,7 @@  static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
 		info.seg_start = offset;
 	}
 
-	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+	rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
 	if (!rc)
 		*seg_len = info.dest_buf_size;
 
@@ -3975,8 +4031,8 @@  static int bnxt_get_ts_info(struct net_device *dev,
 
 void bnxt_ethtool_init(struct bnxt *bp)
 {
-	struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_selftest_qlist_input req = {0};
+	struct hwrm_selftest_qlist_output *resp;
+	struct hwrm_selftest_qlist_input *req;
 	struct bnxt_test_info *test_info;
 	struct net_device *dev = bp->dev;
 	int i, rc;
@@ -3988,19 +4044,22 @@  void bnxt_ethtool_init(struct bnxt *bp)
 	if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
 		return;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (rc)
-		goto ethtool_init_exit;
-
 	test_info = bp->test_info;
-	if (!test_info)
+	if (!test_info) {
 		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
-	if (!test_info)
+		if (!test_info)
+			return;
+		bp->test_info = test_info;
+	}
+
+	if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
+		return;
+
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
+	if (rc)
 		goto ethtool_init_exit;
 
-	bp->test_info = test_info;
 	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
 	if (bp->num_tests > BNXT_MAX_TEST)
 		bp->num_tests = BNXT_MAX_TEST;
@@ -4034,7 +4093,7 @@  void bnxt_ethtool_init(struct bnxt *bp)
 	}
 
 ethtool_init_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 }
 
 static void bnxt_get_eth_phy_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 4cc2379027cf..f0aa480799ca 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -86,24 +86,28 @@  static void bnxt_ptp_get_current_time(struct bnxt *bp)
 
 static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
 {
-	struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_port_ts_query_input req = {0};
+	struct hwrm_port_ts_query_output *resp;
+	struct hwrm_port_ts_query_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1);
-	req.flags = cpu_to_le32(flags);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY);
+	if (rc)
+		return rc;
+
+	req->flags = cpu_to_le32(flags);
 	if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
 	    PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
-		req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
-		req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
-		req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
-		req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+		req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
+		req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+		req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
+		req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
 	}
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+
+	rc = hwrm_req_send(bp, req);
 	if (!rc)
 		*ts = le64_to_cpu(resp->ptp_msg_ts);
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -144,14 +148,17 @@  static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
 {
 	struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
 						ptp_info);
-	struct hwrm_port_mac_cfg_input req = {0};
+	struct hwrm_port_mac_cfg_input *req;
 	struct bnxt *bp = ptp->bp;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
-	req.ptp_freq_adj_ppb = cpu_to_le32(ppb);
-	req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+	if (rc)
+		return rc;
+
+	req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+	rc = hwrm_req_send(ptp->bp, req);
 	if (rc)
 		netdev_err(ptp->bp->dev,
 			   "ptp adjfreq failed. rc = %d\n", rc);
@@ -187,7 +194,7 @@  void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
 
 static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
 {
-	struct hwrm_func_ptp_pin_cfg_input req = {0};
+	struct hwrm_func_ptp_pin_cfg_input *req;
 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 	u8 state = usage != BNXT_PPS_PIN_NONE;
 	u8 *pin_state, *pin_usg;
@@ -199,18 +206,21 @@  static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
 		return -EOPNOTSUPP;
 	}
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_CFG, -1, -1);
+	rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG);
+	if (rc)
+		return rc;
+
 	enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE |
 		   FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2);
-	req.enables = cpu_to_le32(enables);
+	req->enables = cpu_to_le32(enables);
 
-	pin_state = &req.pin0_state;
-	pin_usg = &req.pin0_usage;
+	pin_state = &req->pin0_state;
+	pin_usg = &req->pin0_usage;
 
 	*(pin_state + (pin * 2)) = state;
 	*(pin_usg + (pin * 2)) = usage;
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send(ptp->bp, req);
 	if (rc)
 		return rc;
 
@@ -222,12 +232,16 @@  static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
 
 static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
 {
-	struct hwrm_func_ptp_cfg_input req = {0};
+	struct hwrm_func_ptp_cfg_input *req;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
-	req.enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
-	req.ptp_pps_event = event;
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
+	req->ptp_pps_event = event;
+	return hwrm_req_send(bp, req);
 }
 
 void bnxt_ptp_reapply_pps(struct bnxt *bp)
@@ -278,7 +292,7 @@  static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
 static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
 			       struct ptp_clock_request *rq)
 {
-	struct hwrm_func_ptp_cfg_input req = {0};
+	struct hwrm_func_ptp_cfg_input *req;
 	struct bnxt *bp = ptp->bp;
 	struct timespec64 ts;
 	u64 target_ns, delta;
@@ -293,20 +307,22 @@  static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
 	if (rc)
 		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+	if (rc)
+		return rc;
 
 	enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD |
 		  FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP |
 		  FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE;
-	req.enables = cpu_to_le16(enables);
-	req.ptp_pps_event = 0;
-	req.ptp_freq_adj_dll_source = 0;
-	req.ptp_freq_adj_dll_phase = 0;
-	req.ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
-	req.ptp_freq_adj_ext_up = 0;
-	req.ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
-
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->enables = cpu_to_le16(enables);
+	req->ptp_pps_event = 0;
+	req->ptp_freq_adj_dll_source = 0;
+	req->ptp_freq_adj_dll_phase = 0;
+	req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
+	req->ptp_freq_adj_ext_up = 0;
+	req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
+
+	return hwrm_req_send(bp, req);
 }
 
 static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
@@ -363,11 +379,15 @@  static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
 
 static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
 {
-	struct hwrm_port_mac_cfg_input req = {0};
 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+	struct hwrm_port_mac_cfg_input *req;
 	u32 flags = 0;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+	if (rc)
+		return rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
 	if (ptp->rx_filter)
 		flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
 	else
@@ -376,11 +396,11 @@  static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
 		flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
 	else
 		flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
-	req.flags = cpu_to_le32(flags);
-	req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
-	req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+	req->flags = cpu_to_le32(flags);
+	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+	req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -631,11 +651,10 @@  static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin,
 		return -EOPNOTSUPP;
 }
 
-/* bp->hwrm_cmd_lock held by the caller */
 static int bnxt_ptp_pps_init(struct bnxt *bp)
 {
-	struct hwrm_func_ptp_pin_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_ptp_pin_qcfg_input req = {0};
+	struct hwrm_func_ptp_pin_qcfg_output *resp;
+	struct hwrm_func_ptp_pin_qcfg_input *req;
 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 	struct ptp_clock_info *ptp_info;
 	struct bnxt_pps *pps_info;
@@ -643,11 +662,16 @@  static int bnxt_ptp_pps_init(struct bnxt *bp)
 	u32 i, rc;
 
 	/* Query current/default PIN CFG */
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_QCFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG);
+	if (rc)
+		return rc;
 
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (rc || !resp->num_pins)
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	if (rc || !resp->num_pins) {
+		hwrm_req_drop(bp, req);
 		return -EOPNOTSUPP;
+	}
 
 	ptp_info = &ptp->ptp_info;
 	pps_info = &ptp->pps_info;
@@ -656,8 +680,10 @@  static int bnxt_ptp_pps_init(struct bnxt *bp)
 	ptp_info->pin_config = kcalloc(ptp_info->n_pins,
 				       sizeof(*ptp_info->pin_config),
 				       GFP_KERNEL);
-	if (!ptp_info->pin_config)
+	if (!ptp_info->pin_config) {
+		hwrm_req_drop(bp, req);
 		return -ENOMEM;
+	}
 
 	/* Report the TSIO capability to kernel */
 	pin_usg = &resp->pin0_usage;
@@ -675,6 +701,7 @@  static int bnxt_ptp_pps_init(struct bnxt *bp)
 
 		pps_info->pins[i].usage = *pin_usg;
 	}
+	hwrm_req_drop(bp, req);
 
 	/* Only 1 each of ext_ts and per_out pins is available in HW */
 	ptp_info->n_ext_ts = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 07e8e9f657e4..ff06dd81a50a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -27,21 +27,26 @@ 
 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
 					  struct bnxt_vf_info *vf, u16 event_id)
 {
-	struct hwrm_fwd_async_event_cmpl_input req = {0};
+	struct hwrm_fwd_async_event_cmpl_input *req;
 	struct hwrm_async_event_cmpl *async_cmpl;
 	int rc = 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
+	if (rc)
+		goto exit;
+
 	if (vf)
-		req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+		req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
 	else
 		/* broadcast this async event to all VFs */
-		req.encap_async_event_target_id = cpu_to_le16(0xffff);
-	async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+		req->encap_async_event_target_id = cpu_to_le16(0xffff);
+	async_cmpl =
+		(struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
 	async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
 	async_cmpl->event_id = cpu_to_le16(event_id);
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_send(bp, req);
+exit:
 	if (rc)
 		netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
 			   rc);
@@ -63,10 +68,10 @@  static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
 
 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
 {
-	struct hwrm_func_cfg_input req = {0};
 	struct bnxt *bp = netdev_priv(dev);
-	struct bnxt_vf_info *vf;
+	struct hwrm_func_cfg_input *req;
 	bool old_setting = false;
+	struct bnxt_vf_info *vf;
 	u32 func_flags;
 	int rc;
 
@@ -90,36 +95,38 @@  int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
 	/*TODO: if the driver supports VLAN filter on guest VLAN,
 	 * the spoof check should also include vlan anti-spoofing
 	 */
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(vf->fw_fid);
-	req.flags = cpu_to_le32(func_flags);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
 	if (!rc) {
-		if (setting)
-			vf->flags |= BNXT_VF_SPOOFCHK;
-		else
-			vf->flags &= ~BNXT_VF_SPOOFCHK;
+		req->fid = cpu_to_le16(vf->fw_fid);
+		req->flags = cpu_to_le32(func_flags);
+		rc = hwrm_req_send(bp, req);
+		if (!rc) {
+			if (setting)
+				vf->flags |= BNXT_VF_SPOOFCHK;
+			else
+				vf->flags &= ~BNXT_VF_SPOOFCHK;
+		}
 	}
 	return rc;
 }
 
 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
 {
-	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp;
+	struct hwrm_func_qcfg_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
-	req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (rc) {
-		mutex_unlock(&bp->hwrm_cmd_lock);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+	if (rc)
 		return rc;
-	}
-	vf->func_qcfg_flags = le16_to_cpu(resp->flags);
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	return 0;
+
+	req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	if (!rc)
+		vf->func_qcfg_flags = le16_to_cpu(resp->flags);
+	hwrm_req_drop(bp, req);
+	return rc;
 }
 
 bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -133,18 +140,22 @@  bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 
 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_cfg_input *req;
+	int rc;
 
 	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(vf->fw_fid);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (rc)
+		return rc;
+
+	req->fid = cpu_to_le16(vf->fw_fid);
 	if (vf->flags & BNXT_VF_TRUST)
-		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 	else
-		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
+	return hwrm_req_send(bp, req);
 }
 
 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@@ -204,8 +215,8 @@  int bnxt_get_vf_config(struct net_device *dev, int vf_id,
 
 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
 {
-	struct hwrm_func_cfg_input req = {0};
 	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_func_cfg_input *req;
 	struct bnxt_vf_info *vf;
 	int rc;
 
@@ -221,19 +232,23 @@  int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
 	}
 	vf = &bp->pf.vf[vf_id];
 
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (rc)
+		return rc;
+
 	memcpy(vf->mac_addr, mac, ETH_ALEN);
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(vf->fw_fid);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
-	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	req->fid = cpu_to_le16(vf->fw_fid);
+	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
+	return hwrm_req_send(bp, req);
 }
 
 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 		     __be16 vlan_proto)
 {
-	struct hwrm_func_cfg_input req = {0};
 	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_func_cfg_input *req;
 	struct bnxt_vf_info *vf;
 	u16 vlan_tag;
 	int rc;
@@ -259,21 +274,23 @@  int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 	if (vlan_tag == vf->vlan)
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(vf->fw_fid);
-	req.dflt_vlan = cpu_to_le16(vlan_tag);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (!rc)
-		vf->vlan = vlan_tag;
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (!rc) {
+		req->fid = cpu_to_le16(vf->fw_fid);
+		req->dflt_vlan = cpu_to_le16(vlan_tag);
+		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+		rc = hwrm_req_send(bp, req);
+		if (!rc)
+			vf->vlan = vlan_tag;
+	}
 	return rc;
 }
 
 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
 		   int max_tx_rate)
 {
-	struct hwrm_func_cfg_input req = {0};
 	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_func_cfg_input *req;
 	struct bnxt_vf_info *vf;
 	u32 pf_link_speed;
 	int rc;
@@ -297,16 +314,18 @@  int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
 	}
 	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
 		return 0;
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(vf->fw_fid);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
-	req.max_bw = cpu_to_le32(max_tx_rate);
-	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
-	req.min_bw = cpu_to_le32(min_tx_rate);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
 	if (!rc) {
-		vf->min_tx_rate = min_tx_rate;
-		vf->max_tx_rate = max_tx_rate;
+		req->fid = cpu_to_le16(vf->fw_fid);
+		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
+					   FUNC_CFG_REQ_ENABLES_MIN_BW);
+		req->max_bw = cpu_to_le32(max_tx_rate);
+		req->min_bw = cpu_to_le32(min_tx_rate);
+		rc = hwrm_req_send(bp, req);
+		if (!rc) {
+			vf->min_tx_rate = min_tx_rate;
+			vf->max_tx_rate = max_tx_rate;
+		}
 	}
 	return rc;
 }
@@ -359,21 +378,22 @@  static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
 
 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
 {
-	int i, rc = 0;
+	struct hwrm_func_vf_resc_free_input *req;
 	struct bnxt_pf_info *pf = &bp->pf;
-	struct hwrm_func_vf_resc_free_input req = {0};
+	int i, rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
+	if (rc)
+		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	hwrm_req_hold(bp, req);
 	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
-		req.vf_id = cpu_to_le16(i);
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		req->vf_id = cpu_to_le16(i);
+		rc = hwrm_req_send(bp, req);
 		if (rc)
 			break;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -447,51 +467,55 @@  static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
 
 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 {
-	struct hwrm_func_buf_rgtr_input req = {0};
+	struct hwrm_func_buf_rgtr_input *req;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
+	if (rc)
+		return rc;
 
-	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
-	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
-	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
-	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
-	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
-	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
-	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+	req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+	req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+	req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+	req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+	req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+	req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+	req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
 
-	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
-/* Caller holds bp->hwrm_cmd_lock mutex lock */
-static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
+static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_cfg_input *req;
 	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (rc)
+		return rc;
 
 	vf = &bp->pf.vf[vf_id];
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(vf->fw_fid);
+	req->fid = cpu_to_le16(vf->fw_fid);
 
 	if (is_valid_ether_addr(vf->mac_addr)) {
-		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
-		memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
+		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+		memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
 	}
 	if (vf->vlan) {
-		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
-		req.dflt_vlan = cpu_to_le16(vf->vlan);
+		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+		req->dflt_vlan = cpu_to_le16(vf->vlan);
 	}
 	if (vf->max_tx_rate) {
-		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
-		req.max_bw = cpu_to_le32(vf->max_tx_rate);
-#ifdef HAVE_IFLA_TX_RATE
-		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
-		req.min_bw = cpu_to_le32(vf->min_tx_rate);
-#endif
+		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
+					    FUNC_CFG_REQ_ENABLES_MIN_BW);
+		req->max_bw = cpu_to_le32(vf->max_tx_rate);
+		req->min_bw = cpu_to_le32(vf->min_tx_rate);
 	}
 	if (vf->flags & BNXT_VF_TRUST)
-		req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+		req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 
-	_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	return hwrm_req_send(bp, req);
 }
 
 /* Only called by PF to reserve resources for VFs, returns actual number of
@@ -499,7 +523,7 @@  static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
  */
 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 {
-	struct hwrm_func_vf_resource_cfg_input req = {0};
+	struct hwrm_func_vf_resource_cfg_input *req;
 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
 	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
@@ -508,7 +532,9 @@  static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 	u16 vf_msix = 0;
 	u16 vf_rss;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
+	if (rc)
+		return rc;
 
 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
 		vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
@@ -527,21 +553,21 @@  static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 	vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
 
-	req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
+	req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
 	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 		min = 0;
-		req.min_rsscos_ctx = cpu_to_le16(min);
+		req->min_rsscos_ctx = cpu_to_le16(min);
 	}
 	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
 	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
-		req.min_cmpl_rings = cpu_to_le16(min);
-		req.min_tx_rings = cpu_to_le16(min);
-		req.min_rx_rings = cpu_to_le16(min);
-		req.min_l2_ctxs = cpu_to_le16(min);
-		req.min_vnics = cpu_to_le16(min);
-		req.min_stat_ctx = cpu_to_le16(min);
+		req->min_cmpl_rings = cpu_to_le16(min);
+		req->min_tx_rings = cpu_to_le16(min);
+		req->min_rx_rings = cpu_to_le16(min);
+		req->min_l2_ctxs = cpu_to_le16(min);
+		req->min_vnics = cpu_to_le16(min);
+		req->min_stat_ctx = cpu_to_le16(min);
 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
-			req.min_hw_ring_grps = cpu_to_le16(min);
+			req->min_hw_ring_grps = cpu_to_le16(min);
 	} else {
 		vf_cp_rings /= num_vfs;
 		vf_tx_rings /= num_vfs;
@@ -551,56 +577,57 @@  static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 		vf_ring_grps /= num_vfs;
 		vf_rss /= num_vfs;
 
-		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
-		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
-		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
-		req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
-		req.min_vnics = cpu_to_le16(vf_vnics);
-		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
-		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
-		req.min_rsscos_ctx = cpu_to_le16(vf_rss);
+		req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+		req->min_tx_rings = cpu_to_le16(vf_tx_rings);
+		req->min_rx_rings = cpu_to_le16(vf_rx_rings);
+		req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+		req->min_vnics = cpu_to_le16(vf_vnics);
+		req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+		req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+		req->min_rsscos_ctx = cpu_to_le16(vf_rss);
 	}
-	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
-	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
-	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
-	req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
-	req.max_vnics = cpu_to_le16(vf_vnics);
-	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
-	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
-	req.max_rsscos_ctx = cpu_to_le16(vf_rss);
+	req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req->max_tx_rings = cpu_to_le16(vf_tx_rings);
+	req->max_rx_rings = cpu_to_le16(vf_rx_rings);
+	req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+	req->max_vnics = cpu_to_le16(vf_vnics);
+	req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+	req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+	req->max_rsscos_ctx = cpu_to_le16(vf_rss);
 	if (bp->flags & BNXT_FLAG_CHIP_P5)
-		req.max_msix = cpu_to_le16(vf_msix / num_vfs);
+		req->max_msix = cpu_to_le16(vf_msix / num_vfs);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	hwrm_req_hold(bp, req);
 	for (i = 0; i < num_vfs; i++) {
 		if (reset)
 			__bnxt_set_vf_params(bp, i);
 
-		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+		rc = hwrm_req_send(bp, req);
 		if (rc)
 			break;
 		pf->active_vfs = i + 1;
 		pf->vf[i].fw_fid = pf->first_vf_id + i;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+
 	if (pf->active_vfs) {
 		u16 n = pf->active_vfs;
 
-		hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
-		hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
-		hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
-					     n;
-		hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
-		hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
-		hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
-		hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
+		hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
+		hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
+		hw_resc->max_hw_ring_grps -=
+			le16_to_cpu(req->min_hw_ring_grps) * n;
+		hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
+		hw_resc->max_rsscos_ctxs -=
+			le16_to_cpu(req->min_rsscos_ctx) * n;
+		hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
+		hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
 		if (bp->flags & BNXT_FLAG_CHIP_P5)
 			hw_resc->max_irqs -= vf_msix * n;
 
 		rc = pf->active_vfs;
 	}
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -609,15 +636,18 @@  static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
  */
 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 {
-	u32 rc = 0, mtu, i;
 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-	struct hwrm_func_cfg_input req = {0};
 	struct bnxt_pf_info *pf = &bp->pf;
+	struct hwrm_func_cfg_input *req;
 	int total_vf_tx_rings = 0;
 	u16 vf_ring_grps;
+	u32 mtu, i;
+	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+	if (rc)
+		return rc;
 
 	/* Remaining rings are distributed equally amongs VF's for now */
 	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
@@ -633,50 +663,49 @@  static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
 	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
-				  FUNC_CFG_REQ_ENABLES_MRU |
-				  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
-				  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
-				  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
-				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
-				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
-				  FUNC_CFG_REQ_ENABLES_NUM_VNICS |
-				  FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
+	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
+				   FUNC_CFG_REQ_ENABLES_MRU |
+				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+				   FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+				   FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+				   FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+				   FUNC_CFG_REQ_ENABLES_NUM_VNICS |
+				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
 
 	mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
-	req.mru = cpu_to_le16(mtu);
-	req.admin_mtu = cpu_to_le16(mtu);
+	req->mru = cpu_to_le16(mtu);
+	req->admin_mtu = cpu_to_le16(mtu);
 
-	req.num_rsscos_ctxs = cpu_to_le16(1);
-	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
-	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
-	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
-	req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
-	req.num_l2_ctxs = cpu_to_le16(4);
+	req->num_rsscos_ctxs = cpu_to_le16(1);
+	req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req->num_tx_rings = cpu_to_le16(vf_tx_rings);
+	req->num_rx_rings = cpu_to_le16(vf_rx_rings);
+	req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+	req->num_l2_ctxs = cpu_to_le16(4);
 
-	req.num_vnics = cpu_to_le16(vf_vnics);
+	req->num_vnics = cpu_to_le16(vf_vnics);
 	/* FIXME spec currently uses 1 bit for stats ctx */
-	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+	req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
+	hwrm_req_hold(bp, req);
 	for (i = 0; i < num_vfs; i++) {
 		int vf_tx_rsvd = vf_tx_rings;
 
-		req.fid = cpu_to_le16(pf->first_vf_id + i);
-		rc = _hwrm_send_message(bp, &req, sizeof(req),
-					HWRM_CMD_TIMEOUT);
+		req->fid = cpu_to_le16(pf->first_vf_id + i);
+		rc = hwrm_req_send(bp, req);
 		if (rc)
 			break;
 		pf->active_vfs = i + 1;
-		pf->vf[i].fw_fid = le16_to_cpu(req.fid);
+		pf->vf[i].fw_fid = le16_to_cpu(req->fid);
 		rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
 					      &vf_tx_rsvd);
 		if (rc)
 			break;
 		total_vf_tx_rings += vf_tx_rsvd;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	if (pf->active_vfs) {
 		hw_resc->max_tx_rings -= total_vf_tx_rings;
 		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
@@ -894,23 +923,24 @@  static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 			      void *encap_resp, __le64 encap_resp_addr,
 			      __le16 encap_resp_cpr, u32 msg_size)
 {
-	int rc = 0;
-	struct hwrm_fwd_resp_input req = {0};
+	struct hwrm_fwd_resp_input *req;
+	int rc;
 
 	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
 		return -EINVAL;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
-
-	/* Set the new target id */
-	req.target_id = cpu_to_le16(vf->fw_fid);
-	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
-	req.encap_resp_len = cpu_to_le16(msg_size);
-	req.encap_resp_addr = encap_resp_addr;
-	req.encap_resp_cmpl_ring = encap_resp_cpr;
-	memcpy(req.encap_resp, encap_resp, msg_size);
-
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
+	if (!rc) {
+		/* Set the new target id */
+		req->target_id = cpu_to_le16(vf->fw_fid);
+		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+		req->encap_resp_len = cpu_to_le16(msg_size);
+		req->encap_resp_addr = encap_resp_addr;
+		req->encap_resp_cmpl_ring = encap_resp_cpr;
+		memcpy(req->encap_resp, encap_resp, msg_size);
+
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
 	return rc;
@@ -919,19 +949,21 @@  static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 				  u32 msg_size)
 {
-	int rc = 0;
-	struct hwrm_reject_fwd_resp_input req = {0};
+	struct hwrm_reject_fwd_resp_input *req;
+	int rc;
 
 	if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
 		return -EINVAL;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
-	/* Set the new target id */
-	req.target_id = cpu_to_le16(vf->fw_fid);
-	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
-	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+	rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
+	if (!rc) {
+		/* Set the new target id */
+		req->target_id = cpu_to_le16(vf->fw_fid);
+		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
 	return rc;
@@ -940,19 +972,21 @@  static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 				   u32 msg_size)
 {
-	int rc = 0;
-	struct hwrm_exec_fwd_resp_input req = {0};
+	struct hwrm_exec_fwd_resp_input *req;
+	int rc;
 
 	if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
 		return -EINVAL;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
-	/* Set the new target id */
-	req.target_id = cpu_to_le16(vf->fw_fid);
-	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
-	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+	rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
+	if (!rc) {
+		/* Set the new target id */
+		req->target_id = cpu_to_le16(vf->fw_fid);
+		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
 	return rc;
@@ -1119,8 +1153,8 @@  void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
 
 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 {
-	struct hwrm_func_vf_cfg_input req = {0};
-	int rc = 0;
+	struct hwrm_func_vf_cfg_input *req;
+	int rc;
 
 	if (!BNXT_VF(bp))
 		return 0;
@@ -1130,10 +1164,16 @@  int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 			rc = -EADDRNOTAVAIL;
 		goto mac_done;
 	}
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
-	req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
-	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
+	if (rc)
+		goto mac_done;
+
+	req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
+	if (!strict)
+		hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
+	rc = hwrm_req_send(bp, req);
 mac_done:
 	if (rc && strict) {
 		rc = -EADDRNOTAVAIL;
@@ -1146,15 +1186,17 @@  int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 
 void bnxt_update_vf_mac(struct bnxt *bp)
 {
-	struct hwrm_func_qcaps_input req = {0};
-	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_qcaps_output *resp;
+	struct hwrm_func_qcaps_input *req;
 	bool inform_pf = false;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
+	if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
+		return;
+
+	req->fid = cpu_to_le16(0xffff);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+	resp = hwrm_req_hold(bp, req);
+	if (hwrm_req_send(bp, req))
 		goto update_vf_mac_exit;
 
 	/* Store MAC address from the firmware.  There are 2 cases:
@@ -1177,7 +1219,7 @@  void bnxt_update_vf_mac(struct bnxt *bp)
 	if (is_valid_ether_addr(bp->vf.mac_addr))
 		memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
 update_vf_mac_exit:
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	if (inform_pf)
 		bnxt_approve_mac(bp, bp->dev->dev_addr, false);
 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index c0c3cc426f7b..46fae1acbeed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -503,16 +503,18 @@  static int bnxt_tc_parse_flow(struct bnxt *bp,
 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
 				   struct bnxt_tc_flow_node *flow_node)
 {
-	struct hwrm_cfa_flow_free_input req = { 0 };
+	struct hwrm_cfa_flow_free_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
-	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
-		req.ext_flow_handle = flow_node->ext_flow_handle;
-	else
-		req.flow_handle = flow_node->flow_handle;
+	rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE);
+	if (!rc) {
+		if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+			req->ext_flow_handle = flow_node->ext_flow_handle;
+		else
+			req->flow_handle = flow_node->flow_handle;
 
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
 
@@ -588,20 +590,22 @@  static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 	struct bnxt_tc_actions *actions = &flow->actions;
 	struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
 	struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
-	struct hwrm_cfa_flow_alloc_input req = { 0 };
 	struct hwrm_cfa_flow_alloc_output *resp;
+	struct hwrm_cfa_flow_alloc_input *req;
 	u16 flow_flags = 0, action_flags = 0;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC);
+	if (rc)
+		return rc;
 
-	req.src_fid = cpu_to_le16(flow->src_fid);
-	req.ref_flow_handle = ref_flow_handle;
+	req->src_fid = cpu_to_le16(flow->src_fid);
+	req->ref_flow_handle = ref_flow_handle;
 
 	if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
-		memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
+		memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac,
 		       ETH_ALEN);
-		memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
+		memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac,
 		       ETH_ALEN);
 		action_flags |=
 			CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
@@ -616,71 +620,71 @@  static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 				action_flags |=
 					CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
 				/* L3 source rewrite */
-				req.nat_ip_address[0] =
+				req->nat_ip_address[0] =
 					actions->nat.l3.ipv4.saddr.s_addr;
 				/* L4 source port */
 				if (actions->nat.l4.ports.sport)
-					req.nat_port =
+					req->nat_port =
 						actions->nat.l4.ports.sport;
 			} else {
 				action_flags |=
 					CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
 				/* L3 destination rewrite */
-				req.nat_ip_address[0] =
+				req->nat_ip_address[0] =
 					actions->nat.l3.ipv4.daddr.s_addr;
 				/* L4 destination port */
 				if (actions->nat.l4.ports.dport)
-					req.nat_port =
+					req->nat_port =
 						actions->nat.l4.ports.dport;
 			}
 			netdev_dbg(bp->dev,
-				   "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
-				   req.nat_ip_address, actions->nat.src_xlate,
-				   req.nat_port);
+				   "req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n",
+				   req->nat_ip_address, actions->nat.src_xlate,
+				   req->nat_port);
 		} else {
 			if (actions->nat.src_xlate) {
 				action_flags |=
 					CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
 				/* L3 source rewrite */
-				memcpy(req.nat_ip_address,
+				memcpy(req->nat_ip_address,
 				       actions->nat.l3.ipv6.saddr.s6_addr32,
-				       sizeof(req.nat_ip_address));
+				       sizeof(req->nat_ip_address));
 				/* L4 source port */
 				if (actions->nat.l4.ports.sport)
-					req.nat_port =
+					req->nat_port =
 						actions->nat.l4.ports.sport;
 			} else {
 				action_flags |=
 					CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
 				/* L3 destination rewrite */
-				memcpy(req.nat_ip_address,
+				memcpy(req->nat_ip_address,
 				       actions->nat.l3.ipv6.daddr.s6_addr32,
-				       sizeof(req.nat_ip_address));
+				       sizeof(req->nat_ip_address));
 				/* L4 destination port */
 				if (actions->nat.l4.ports.dport)
-					req.nat_port =
+					req->nat_port =
 						actions->nat.l4.ports.dport;
 			}
 			netdev_dbg(bp->dev,
-				   "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
-				   req.nat_ip_address, actions->nat.src_xlate,
-				   req.nat_port);
+				   "req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n",
+				   req->nat_ip_address, actions->nat.src_xlate,
+				   req->nat_port);
 		}
 	}
 
 	if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
 	    actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
-		req.tunnel_handle = tunnel_handle;
+		req->tunnel_handle = tunnel_handle;
 		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
 		action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
 	}
 
-	req.ethertype = flow->l2_key.ether_type;
-	req.ip_proto = flow->l4_key.ip_proto;
+	req->ethertype = flow->l2_key.ether_type;
+	req->ip_proto = flow->l4_key.ip_proto;
 
 	if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
-		memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
-		memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
+		memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN);
+		memcpy(req->smac, flow->l2_key.smac, ETH_ALEN);
 	}
 
 	if (flow->l2_key.num_vlans > 0) {
@@ -689,7 +693,7 @@  static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 		 * in outer_vlan_tci when num_vlans is 1 (which is
 		 * always the case in TC.)
 		 */
-		req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
+		req->outer_vlan_tci = flow->l2_key.inner_vlan_tci;
 	}
 
 	/* If all IP and L4 fields are wildcarded then this is an L2 flow */
@@ -702,68 +706,67 @@  static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 				CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
 
 		if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
-			req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
-			req.ip_dst_mask_len =
+			req->ip_dst[0] = l3_key->ipv4.daddr.s_addr;
+			req->ip_dst_mask_len =
 				inet_mask_len(l3_mask->ipv4.daddr.s_addr);
-			req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
-			req.ip_src_mask_len =
+			req->ip_src[0] = l3_key->ipv4.saddr.s_addr;
+			req->ip_src_mask_len =
 				inet_mask_len(l3_mask->ipv4.saddr.s_addr);
 		} else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
-			memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
-			       sizeof(req.ip_dst));
-			req.ip_dst_mask_len =
+			memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32,
+			       sizeof(req->ip_dst));
+			req->ip_dst_mask_len =
 					ipv6_mask_len(&l3_mask->ipv6.daddr);
-			memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
-			       sizeof(req.ip_src));
-			req.ip_src_mask_len =
+			memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32,
+			       sizeof(req->ip_src));
+			req->ip_src_mask_len =
 					ipv6_mask_len(&l3_mask->ipv6.saddr);
 		}
 	}
 
 	if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
-		req.l4_src_port = flow->l4_key.ports.sport;
-		req.l4_src_port_mask = flow->l4_mask.ports.sport;
-		req.l4_dst_port = flow->l4_key.ports.dport;
-		req.l4_dst_port_mask = flow->l4_mask.ports.dport;
+		req->l4_src_port = flow->l4_key.ports.sport;
+		req->l4_src_port_mask = flow->l4_mask.ports.sport;
+		req->l4_dst_port = flow->l4_key.ports.dport;
+		req->l4_dst_port_mask = flow->l4_mask.ports.dport;
 	} else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
 		/* l4 ports serve as type/code when ip_proto is ICMP */
-		req.l4_src_port = htons(flow->l4_key.icmp.type);
-		req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
-		req.l4_dst_port = htons(flow->l4_key.icmp.code);
-		req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
+		req->l4_src_port = htons(flow->l4_key.icmp.type);
+		req->l4_src_port_mask = htons(flow->l4_mask.icmp.type);
+		req->l4_dst_port = htons(flow->l4_key.icmp.code);
+		req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
 	}
-	req.flags = cpu_to_le16(flow_flags);
+	req->flags = cpu_to_le16(flow_flags);
 
 	if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
 		action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
 	} else {
 		if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
 			action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
-			req.dst_fid = cpu_to_le16(actions->dst_fid);
+			req->dst_fid = cpu_to_le16(actions->dst_fid);
 		}
 		if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
 			action_flags |=
 			    CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
-			req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
-			req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
-			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
-			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
+			req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
+			req->l2_rewrite_vlan_tci = actions->push_vlan_tci;
+			memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN);
+			memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN);
 		}
 		if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
 			action_flags |=
 			    CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
 			/* Rewrite config with tpid = 0 implies vlan pop */
-			req.l2_rewrite_vlan_tpid = 0;
-			memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
-			memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
+			req->l2_rewrite_vlan_tpid = 0;
+			memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN);
+			memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN);
 		}
 	}
-	req.action_flags = cpu_to_le16(action_flags);
+	req->action_flags = cpu_to_le16(action_flags);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
 	if (!rc) {
-		resp = bnxt_get_hwrm_resp_addr(bp, &req);
 		/* CFA_FLOW_ALLOC response interpretation:
 		 *		    fw with	     fw with
 		 *		    16-bit	     64-bit
@@ -779,7 +782,7 @@  static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
 			flow_node->flow_id = resp->flow_id;
 		}
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
@@ -789,67 +792,69 @@  static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
 				       __le32 ref_decap_handle,
 				       __le32 *decap_filter_handle)
 {
-	struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
 	struct hwrm_cfa_decap_filter_alloc_output *resp;
 	struct ip_tunnel_key *tun_key = &flow->tun_key;
+	struct hwrm_cfa_decap_filter_alloc_input *req;
 	u32 enables = 0;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC);
+	if (rc)
+		goto exit;
 
-	req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
+	req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
 	enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
 		   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
-	req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
-	req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
+	req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+	req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
 
 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
 		/* tunnel_id is wrongly defined in hsi defn. as __le32 */
-		req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
+		req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
 	}
 
 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
-		ether_addr_copy(req.dst_macaddr, l2_info->dmac);
+		ether_addr_copy(req->dst_macaddr, l2_info->dmac);
 	}
 	if (l2_info->num_vlans) {
 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
-		req.t_ivlan_vid = l2_info->inner_vlan_tci;
+		req->t_ivlan_vid = l2_info->inner_vlan_tci;
 	}
 
 	enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
-	req.ethertype = htons(ETH_P_IP);
+	req->ethertype = htons(ETH_P_IP);
 
 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
 			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
 			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
-		req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
-		req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
-		req.src_ipaddr[0] = tun_key->u.ipv4.src;
+		req->ip_addr_type =
+			CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
+		req->dst_ipaddr[0] = tun_key->u.ipv4.dst;
+		req->src_ipaddr[0] = tun_key->u.ipv4.src;
 	}
 
 	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
 		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
-		req.dst_port = tun_key->tp_dst;
+		req->dst_port = tun_key->tp_dst;
 	}
 
 	/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
 	 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
 	 */
-	req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
-	req.enables = cpu_to_le32(enables);
+	req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
+	req->enables = cpu_to_le32(enables);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (!rc) {
-		resp = bnxt_get_hwrm_resp_addr(bp, &req);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
+	if (!rc)
 		*decap_filter_handle = resp->decap_filter_id;
-	} else {
+	hwrm_req_drop(bp, req);
+exit:
+	if (rc)
 		netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
-	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
 
 	return rc;
 }
@@ -857,13 +862,14 @@  static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
 				      __le32 decap_filter_handle)
 {
-	struct hwrm_cfa_decap_filter_free_input req = { 0 };
+	struct hwrm_cfa_decap_filter_free_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
-	req.decap_filter_id = decap_filter_handle;
-
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE);
+	if (!rc) {
+		req->decap_filter_id = decap_filter_handle;
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
 
@@ -875,18 +881,18 @@  static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
 				       struct bnxt_tc_l2_key *l2_info,
 				       __le32 *encap_record_handle)
 {
-	struct hwrm_cfa_encap_record_alloc_input req = { 0 };
 	struct hwrm_cfa_encap_record_alloc_output *resp;
-	struct hwrm_cfa_encap_data_vxlan *encap =
-			(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
-	struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
-				(struct hwrm_vxlan_ipv4_hdr *)encap->l3;
+	struct hwrm_cfa_encap_record_alloc_input *req;
+	struct hwrm_cfa_encap_data_vxlan *encap;
+	struct hwrm_vxlan_ipv4_hdr *encap_ipv4;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
-
-	req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
+	rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC);
+	if (rc)
+		goto exit;
 
+	encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data;
+	req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
 	ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
 	ether_addr_copy(encap->src_mac_addr, l2_info->smac);
 	if (l2_info->num_vlans) {
@@ -895,6 +901,7 @@  static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
 		encap->ovlan_tpid = l2_info->inner_vlan_tpid;
 	}
 
+	encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
 	encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
 	encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
 	encap_ipv4->ttl = encap_key->ttl;
@@ -906,15 +913,14 @@  static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
 	encap->dst_port = encap_key->tp_dst;
 	encap->vni = tunnel_id_to_key32(encap_key->tun_id);
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-	if (!rc) {
-		resp = bnxt_get_hwrm_resp_addr(bp, &req);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send_silent(bp, req);
+	if (!rc)
 		*encap_record_handle = resp->encap_record_id;
-	} else {
+	hwrm_req_drop(bp, req);
+exit:
+	if (rc)
 		netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
-	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
 
 	return rc;
 }
@@ -922,13 +928,14 @@  static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
 				      __le32 encap_record_handle)
 {
-	struct hwrm_cfa_encap_record_free_input req = { 0 };
+	struct hwrm_cfa_encap_record_free_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
-	req.encap_record_id = encap_record_handle;
-
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE);
+	if (!rc) {
+		req->encap_record_id = encap_record_handle;
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
 
@@ -1674,14 +1681,20 @@  static int
 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
 			     struct bnxt_tc_stats_batch stats_batch[])
 {
-	struct hwrm_cfa_flow_stats_input req = { 0 };
 	struct hwrm_cfa_flow_stats_output *resp;
-	__le16 *req_flow_handles = &req.flow_handle_0;
-	__le32 *req_flow_ids = &req.flow_id_0;
+	struct hwrm_cfa_flow_stats_input *req;
+	__le16 *req_flow_handles;
+	__le32 *req_flow_ids;
 	int rc, i;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
-	req.num_flows = cpu_to_le16(num_flows);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS);
+	if (rc)
+		goto exit;
+
+	req_flow_handles = &req->flow_handle_0;
+	req_flow_ids = &req->flow_id_0;
+
+	req->num_flows = cpu_to_le16(num_flows);
 	for (i = 0; i < num_flows; i++) {
 		struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
 
@@ -1689,13 +1702,12 @@  bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
 					&req_flow_handles[i], &req_flow_ids[i]);
 	}
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		__le64 *resp_packets;
 		__le64 *resp_bytes;
 
-		resp = bnxt_get_hwrm_resp_addr(bp, &req);
 		resp_packets = &resp->packet_0;
 		resp_bytes = &resp->byte_0;
 
@@ -1705,10 +1717,11 @@  bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
 			stats_batch[i].hw_stats.bytes =
 						le64_to_cpu(resp_bytes[i]);
 		}
-	} else {
-		netdev_info(bp->dev, "error rc=%d\n", rc);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
+exit:
+	if (rc)
+		netdev_info(bp->dev, "error rc=%d\n", rc);
 
 	return rc;
 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index f621cffccd3a..fde0c3e8ac57 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -238,27 +238,33 @@  static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
 {
 	struct net_device *dev = edev->net;
 	struct bnxt *bp = netdev_priv(dev);
+	struct output *resp;
 	struct input *req;
+	u32 resp_len;
 	int rc;
 
 	if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
 		return -EBUSY;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	req = fw_msg->msg;
-	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
-	rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
-				fw_msg->timeout);
-	if (!rc) {
-		struct output *resp = bp->hwrm_cmd_resp_addr;
-		u32 len = le16_to_cpu(resp->resp_len);
+	rc = hwrm_req_init(bp, req, 0 /* don't care */);
+	if (rc)
+		return rc;
 
-		if (fw_msg->resp_max_len < len)
-			len = fw_msg->resp_max_len;
+	rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
+	if (rc)
+		return rc;
 
-		memcpy(fw_msg->resp, resp, len);
+	hwrm_req_timeout(bp, req, fw_msg->timeout);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
+	resp_len = le16_to_cpu(resp->resp_len);
+	if (resp_len) {
+		if (fw_msg->resp_max_len < resp_len)
+			resp_len = fw_msg->resp_max_len;
+
+		memcpy(fw_msg->resp, resp, resp_len);
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 3ed712a08207..9401936b74fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -28,38 +28,40 @@ 
 static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
 			      u16 *tx_cfa_action, u16 *rx_cfa_code)
 {
-	struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_cfa_vfr_alloc_input req = { 0 };
+	struct hwrm_cfa_vfr_alloc_output *resp;
+	struct hwrm_cfa_vfr_alloc_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
-	req.vf_id = cpu_to_le16(vf_idx);
-	sprintf(req.vfr_name, "vfr%d", vf_idx);
-
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC);
 	if (!rc) {
-		*tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
-		*rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
-		netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
-			   *tx_cfa_action, *rx_cfa_code);
-	} else {
-		netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
+		req->vf_id = cpu_to_le16(vf_idx);
+		sprintf(req->vfr_name, "vfr%d", vf_idx);
+
+		resp = hwrm_req_hold(bp, req);
+		rc = hwrm_req_send(bp, req);
+		if (!rc) {
+			*tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
+			*rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
+			netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
+				   *tx_cfa_action, *rx_cfa_code);
+		}
+		hwrm_req_drop(bp, req);
 	}
-
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	if (rc)
+		netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
 	return rc;
 }
 
 static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
 {
-	struct hwrm_cfa_vfr_free_input req = { 0 };
+	struct hwrm_cfa_vfr_free_input *req;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
-	sprintf(req.vfr_name, "vfr%d", vf_idx);
-
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE);
+	if (!rc) {
+		sprintf(req->vfr_name, "vfr%d", vf_idx);
+		rc = hwrm_req_send(bp, req);
+	}
 	if (rc)
 		netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
 	return rc;
@@ -68,17 +70,18 @@  static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
 static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
 			      u16 *max_mtu)
 {
-	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
-	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp;
+	struct hwrm_func_qcfg_input *req;
 	u16 mtu;
 	int rc;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
-	req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
-
-	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
+	if (rc)
+		return rc;
 
-	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
+	resp = hwrm_req_hold(bp, req);
+	rc = hwrm_req_send(bp, req);
 	if (!rc) {
 		mtu = le16_to_cpu(resp->max_mtu_configured);
 		if (!mtu)
@@ -86,7 +89,7 @@  static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
 		else
 			*max_mtu = mtu;
 	}
-	mutex_unlock(&bp->hwrm_cmd_lock);
+	hwrm_req_drop(bp, req);
 	return rc;
 }