@@ -334,8 +334,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -344,8 +343,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
< PAGE_SIZE ||
priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
< PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -353,8 +351,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
be64_to_cpu(descriptor->max_registered_pages);
mtu = be16_to_cpu(descriptor->mtu);
if (mtu < ETH_MIN_MTU) {
- netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
- mtu);
+ dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -362,12 +359,12 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->num_event_counters = be16_to_cpu(descriptor->counters);
ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
mac = descriptor->mac;
- netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
+ dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
- netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_pages_per_qpl);
+ dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
+ priv->rx_pages_per_qpl);
priv->rx_desc_cnt = priv->rx_pages_per_qpl;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
@@ -930,7 +930,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
- netif_err(priv, drv, priv->dev, "Could not set mtu");
+ dev_err(&priv->pdev->dev, "Could not set mtu");
goto err;
}
}
@@ -970,10 +970,10 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues);
}
- netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
- priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
- netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
- priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
+ priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
+ dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
+ priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
setup_device:
err = gve_setup_device_resources(priv);
@@ -1133,7 +1133,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_db_bar;
}
SET_NETDEV_DEV(dev, &pdev->dev);
+
pci_set_drvdata(pdev, dev);
+
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
/* advertise features */
@@ -1160,11 +1162,16 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->state_flags = 0x0;
gve_set_probe_in_progress(priv);
+
+ err = register_netdev(dev);
+ if (err)
+ goto abort_with_netdev;
+
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
if (!priv->gve_wq) {
dev_err(&pdev->dev, "Could not allocate workqueue");
err = -ENOMEM;
- goto abort_with_netdev;
+ goto abort_while_registered;
}
INIT_WORK(&priv->service_task, gve_service_task);
priv->tx_cfg.max_queues = max_tx_queues;
@@ -1174,18 +1181,18 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto abort_with_wq;
- err = register_netdev(dev);
- if (err)
- goto abort_with_wq;
-
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
gve_clear_probe_in_progress(priv);
queue_work(priv->gve_wq, &priv->service_task);
+
return 0;
abort_with_wq:
destroy_workqueue(priv->gve_wq);
+abort_while_registered:
+ unregister_netdev(dev);
+
abort_with_netdev:
free_netdev(dev);