@@ -639,7 +639,8 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr);
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
u8 **hdr, unsigned int hdr_room);
@@ -168,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
dma_addr_t start_hdr_phys;
u16 length, amsdu_pad;
u8 *start_hdr;
@@ -260,7 +261,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
int ret;
tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
+ tb_len);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
goto out_err;
@@ -272,6 +274,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
data_left -= tb_len;
+ data_offset += tb_len;
tso_build_data(skb, &tso, tb_len);
}
}
@@ -1814,23 +1814,31 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
/**
* iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
* @sgt: scatter gather table
- * @addr: Virtual address
+ * @offset: Offset into the mapped memory (i.e. SKB payload data)
+ * @len: Length of the area
*
- * Find the entry that includes the address for the given address and return
- * correct physical address for the TB entry.
+ * Find the DMA address that corresponds to the SKB payload data at the
+ * position given by @offset.
*
* Returns: Address for TB entry
*/
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr)
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len)
{
struct scatterlist *sg;
+ unsigned int sg_offset = 0;
int i;
+ /*
+ * Search the mapped DMA areas in the SG for the area that contains the
+ * data at offset with the given length.
+ */
for_each_sgtable_dma_sg(sgt, sg, i) {
- if (addr >= sg_virt(sg) &&
- (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg))
- return sg_dma_address(sg) +
- ((unsigned long)addr - (unsigned long)sg_virt(sg));
+ if (offset >= sg_offset &&
+ offset + len <= sg_offset + sg_dma_len(sg))
+ return sg_dma_address(sg) + offset - sg_offset;
+
+ sg_offset += sg_dma_len(sg);
}
WARN_ON_ONCE(1);
@@ -1875,7 +1883,9 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ /* Only map the data, not the header (it is copied to the TSO page) */
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
+ skb->data_len);
if (WARN_ON_ONCE(sgt->orig_nents <= 0))
return NULL;
@@ -1900,6 +1910,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
u16 length, iv_len, amsdu_pad;
dma_addr_t start_hdr_phys;
u8 *start_hdr, *pos_hdr;
@@ -2000,7 +2011,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
@@ -2011,6 +2022,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys, size);
data_left -= size;
+ data_offset += size;
tso_build_data(skb, &tso, size);
}
}