@@ -2556,7 +2556,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
tpd->iovec[slot].len = skb_headlen(skb);
++slot;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
@@ -177,7 +177,7 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
sg = msg->sgt.sgl;
sg_set_buf(sg, skb->data, skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
sg = sg_next(sg);
BUG_ON(!sg);
frag = &skb_shinfo(skb)->frags[i];
@@ -240,7 +240,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
return ret;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ret = sdma_txadd_page(dd,
@@ -101,7 +101,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
if (unlikely(ret))
goto bail_txadd;
- for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
+ skb_for_each_frag(tx->skb, i) {
skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
/* combine physically continuous fragments later? */
@@ -289,7 +289,7 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
} else
off = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca,
skb_frag_page(frag),
@@ -329,7 +329,7 @@ void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
} else
off = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(priv->ca, mapping[i + off],
@@ -2168,7 +2168,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(vp->gendev, frag,
@@ -808,7 +808,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
txd->frag.addrHi = 0;
first_txd->numDesc++;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
@@ -1368,7 +1368,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
{
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
@@ -106,7 +106,7 @@ static void greth_print_tx_packet(struct sk_buff *skb)
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, length, true);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
@@ -2453,7 +2453,7 @@ static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
@@ -600,7 +600,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
netif_dbg(pdata, tx_queued, pdata->netdev,
"mapping frag %u\n", i);
@@ -1806,7 +1806,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
for (len = skb_frag_size(frag); len; ) {
packet->rdesc_count++;
@@ -244,7 +244,7 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
skb_headlen(skb),
DMA_TO_DEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
DMA_TO_DEVICE);
@@ -1465,7 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
tpd->adrl.addr = cpu_to_le64(dma);
tpd->len = cpu_to_le16(maplen);
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (++txq->write_idx == txq->count)
@@ -1601,7 +1601,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
u16 fg_size = 0;
u16 proto_hdr_len = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
}
@@ -4071,7 +4071,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
@@ -6579,7 +6579,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
ri = &tnapi->tx_buffers[sw_idx];
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
tx_bug = 1;
@@ -1587,7 +1587,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
if (!skb_is_nonlinear(skb))
goto doorbell;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1595,7 +1595,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
skb_transport_header(skb), PCI_DMA_TODEVICE);
si = skb_shinfo(skb);
- for (i = 0; i < si->nr_frags; i++)
+ skb_for_each_frag(skb, i)
pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -1012,7 +1012,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
copied += len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
@@ -2119,7 +2119,7 @@ static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
goto workaround;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* all fragments need to have aligned start addresses */
@@ -1106,8 +1106,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
txbdp->lstatus = 0;
- for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
- j++) {
+ skb_for_each_frag(tx_queue->tx_skbuff[i], j) {
txbdp++;
dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
be16_to_cpu(txbdp->length),
@@ -551,7 +551,7 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
1 + payload_nfrags, hlen, addr);
}
- for (i = 0; i < shinfo->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &shinfo->frags[i];
idx = (idx + 1) & tx->mask;
@@ -577,7 +577,7 @@ static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
len = le32_to_cpu(desc->linear_len);
dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
addr = le32_to_cpu(desc->frags[i].addr);
len = le32_to_cpu(desc->frags[i].size);
dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
@@ -717,7 +717,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
desc->linear_addr = cpu_to_le32(addr);
desc->linear_len = cpu_to_le32(skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
@@ -1257,7 +1257,7 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
return bd_num;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
if (!size)
@@ -1507,7 +1507,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
bd_num += ret;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
@@ -336,7 +336,7 @@ static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
frag_len = (int)skb_headlen(skb);
memcpy(lb_buf + pkt_offset, skb->data, frag_len);
pkt_offset += frag_len;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
memcpy((lb_buf + pkt_offset), frag_data, frag_len);
@@ -149,7 +149,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
- for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
@@ -189,7 +189,7 @@ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct pci_dev *pdev = hwif->pdev;
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
+ skb_for_each_frag(skb, i)
dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
sges[i + 1].len, DMA_TO_DEVICE);
@@ -1132,7 +1132,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
descs[0].fields.address = dma_addr;
/* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
@@ -1675,7 +1675,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
cur = skb_headlen(skb);
/* Copy the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
memcpy(dst + cur, skb_frag_address(frag),
@@ -1055,7 +1055,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
count += TXD_USE_COUNT(skb_frag_size(frag));
@@ -6324,7 +6324,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
@@ -2165,7 +2165,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
const skb_frag_t *frag;
count++;
@@ -1348,7 +1348,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
@@ -8602,7 +8602,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
@@ -4127,7 +4127,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_for_each_frag(skb, f) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
count += TXD_USE_COUNT(skb_frag_size(frag));
@@ -656,7 +656,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
{
int frag;
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
@@ -4129,7 +4129,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
int i;
dma_addr_t buf_dma_addr;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);
@@ -2786,7 +2786,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *tf = td;
control |= BMU_STFWD;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
@@ -1199,7 +1199,7 @@ static void sky2_rx_submit(struct sky2_port *sky2,
sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
- for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
+ skb_for_each_frag(re->skb, i)
sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
}
@@ -1217,7 +1217,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
dma_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
@@ -1254,7 +1254,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
dma_unmap_single(&pdev->dev, re->data_addr,
dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
dma_unmap_page(&pdev->dev, re->frag_addr[i],
skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_FROM_DEVICE);
@@ -1932,7 +1932,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
@@ -1089,7 +1089,7 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
nfrags = 1;
if (skb_is_gso(skb)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
MTK_TX_DMA_BUF_LEN);
@@ -320,7 +320,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg++;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int fsz = skb_frag_size(frag);
@@ -1692,7 +1692,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
if (err)
goto unlock;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
@@ -816,7 +816,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
cp->tx_skb[entry] = skb;
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
@@ -4001,7 +4001,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag;
- for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
+ skb_for_each_frag(skb, cur_frag) {
const skb_frag_t *frag = info->frags + cur_frag;
void *addr = skb_frag_address(frag);
u32 len = skb_frag_size(frag);
@@ -1910,7 +1910,7 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
goto unmap_frags;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
@@ -203,7 +203,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
copy_buf);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
@@ -3565,7 +3565,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
len -= MAX_TX_DESC_LEN;
} while (len > 0);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
tb = &rp->tx_buffs[idx];
BUG_ON(tb->skb != NULL);
np->ops->unmap_page(np->device, tb->mapping,
@@ -6688,7 +6688,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len -= this_len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
@@ -1054,7 +1054,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
first_len, DMA_TO_DEVICE);
entry = NEXT_TX(entry);
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
@@ -2339,7 +2339,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
goto out_dma_error;
entry = NEXT_TX(entry);
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_for_each_frag(skb, frag) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len, mapping, this_txflags;
@@ -1080,7 +1080,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
return err;
nc = err;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
@@ -1121,7 +1121,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
/* make sure we have enough cookies and alignment in every frag */
docopy = skb_shinfo(skb)->nr_frags >= ncookies;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
docopy |= skb_frag_off(f) & 7;
@@ -576,7 +576,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
netif_dbg(pdata, tx_queued, pdata->netdev,
"mapping frag %u\n", i);
@@ -177,7 +177,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
for (len = skb_frag_size(frag); len; ) {
pkt_info->desc_count++;
@@ -1183,7 +1183,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 frag_size = skb_frag_size(frag);
@@ -1113,7 +1113,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
pdesc = desc;
/* Handle the case where skb is fragmented in pages */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = skb_frag_page(frag);
u32 page_offset = skb_frag_off(frag);
@@ -2576,7 +2576,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
/* Handle fragments */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
@@ -1321,7 +1321,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
total_len += skb_headlen(skb);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
total_len += skb_frag_size(f);
@@ -747,7 +747,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
buf_offset += buf_size;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 buf_size;
@@ -990,7 +990,7 @@ static int txd_estimate(const struct sk_buff *skb)
int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
@@ -1278,7 +1278,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
/* set up the remaining entries to point to the data */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
int tb_idx;
@@ -544,7 +544,7 @@ static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
{
int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
unsigned int fragsz = skb_frag_size(frag);
@@ -1086,7 +1086,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
}
/* Release all the original (foreign) frags. */
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_for_each_frag(skb, f)
skb_frag_unref(skb, f);
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
@@ -744,7 +744,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
/* Requests for all the frags. */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
skb_frag_off(frag),
@@ -3939,7 +3939,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
{
int cnt, elements = 0;
- for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ skb_for_each_frag(skb, cnt) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
elements += qeth_get_elements_for_range(
@@ -4152,7 +4152,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
}
/* map page frags into buffer element(s) */
- for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ skb_for_each_frag(skb, cnt) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
data = skb_frag_address(frag);
@@ -317,7 +317,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
crc = crc32(~0, skb->data, skb_headlen(skb));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
frag = &skb_shinfo(skb)->frags[i];
off = skb_frag_off(frag);
len = skb_frag_size(frag);
@@ -269,7 +269,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr =
@@ -1384,7 +1384,7 @@ static void cxgbit_lro_skb_dump(struct sk_buff *skb)
"frags %u.\n",
skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
pdu_cb->ddigest, pdu_cb->frags);
- for (i = 0; i < ssi->nr_frags; i++)
+ skb_for_each_frag(skb, i)
pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
skb, i, skb_frag_off(&ssi->frags[i]),
skb_frag_size(&ssi->frags[i]));
@@ -1397,7 +1397,7 @@ static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
u8 i;
memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
- for (i = 0; i < ssi->nr_frags; i++)
+ skb_for_each_frag(skb, i)
put_page(skb_frag_page(&ssi->frags[i]));
ssi->nr_frags = 0;
skb->data_len = 0;
@@ -949,7 +949,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
}
/* checksum stuff in frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
@@ -432,7 +432,7 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -564,7 +564,7 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -662,7 +662,7 @@ static void skb_release_data(struct sk_buff *skb)
skb_zcopy_clear(skb, true);
- for (i = 0; i < shinfo->nr_frags; i++)
+ skb_for_each_frag(skb, i)
__skb_frag_unref(&shinfo->frags[i]);
if (shinfo->frag_list)
@@ -1623,7 +1623,7 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
n = NULL;
goto out;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
skb_frag_ref(skb, i);
}
@@ -1698,7 +1698,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto nofrags;
if (skb_zcopy(skb))
refcount_inc(&skb_uarg(skb)->refcnt);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
@@ -2126,7 +2126,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Estimate size of pulled pages. */
eat = delta;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size >= eat)
@@ -2191,7 +2191,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
pull_pages:
eat = delta;
k = 0;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (size <= eat) {
@@ -2259,7 +2259,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to += copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
@@ -2447,7 +2447,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
/*
* then map the fragments
*/
- for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
+ skb_for_each_frag(skb, seg) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(skb_frag_page(f),
@@ -2562,7 +2562,7 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
offset -= skb_headlen(skb);
/* Find where we are in frag list */
- for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
+ skb_for_each_frag(skb, fragidx) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
if (offset < skb_frag_size(frag))
@@ -2661,7 +2661,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
from += copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
@@ -2740,7 +2740,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
pos = copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2840,7 +2840,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
pos = copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
WARN_ON(start > offset + len);
@@ -3072,7 +3072,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
}
skb_zerocopy_clone(to, from, GFP_ATOMIC);
- for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+ skb_for_each_frag(from, i) {
int size;
if (!len)
@@ -3292,7 +3292,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
pos - len);
/* And move data appendix as is. */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
@@ -4419,7 +4419,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
offset += copy;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
WARN_ON(start > offset + len);
@@ -5329,7 +5329,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
/* if the skb is not cloned this does nothing
* since we set nr_frags to 0.
*/
- for (i = 0; i < from_shinfo->nr_frags; i++)
+ skb_for_each_frag(from, i)
__skb_frag_ref(&from_shinfo->frags[i]);
to->truesize += delta;
@@ -6053,7 +6053,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
kfree(data);
return -ENOMEM;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ skb_for_each_frag(skb, i)
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
@@ -487,7 +487,7 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
return NULL;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ skb_for_each_frag(head, i)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->data_len = head->data_len - plen;
clone->len = clone->data_len;
@@ -4357,7 +4357,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
if (crypto_ahash_update(req))
return 1;
- for (i = 0; i < shi->nr_frags; ++i) {
+ skb_for_each_frag(skb, i) {
const skb_frag_t *f = &shi->frags[i];
unsigned int offset = skb_frag_off(f);
struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
@@ -1644,7 +1644,7 @@ static int __pskb_trim_head(struct sk_buff *skb, int len)
eat = len;
k = 0;
shinfo = skb_shinfo(skb);
- for (i = 0; i < shinfo->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int size = skb_frag_size(&shinfo->frags[i]);
if (size <= eat) {
@@ -1079,7 +1079,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* skip iucv_array lying in the headroom */
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
@@ -1181,7 +1181,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
iba[0].address = (u32)(addr_t)skb->data;
iba[0].length = (u32)skb_headlen(skb);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
iba[i + 1].address =
@@ -630,8 +630,7 @@ static int kcm_write_msgs(struct kcm_sock *kcm)
goto out;
}
- for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
- fragidx++) {
+ skb_for_each_frag(skb, fragidx) {
skb_frag_t *frag;
frag_offset = 0;
@@ -63,7 +63,7 @@ static int __skb_nsg(struct sk_buff *skb, int offset, int len,
offset += chunk;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_for_each_frag(skb, i) {
int end;
WARN_ON(start > offset + len);