@@ -232,6 +232,9 @@ struct gve_priv {
struct work_struct service_task;
unsigned long service_task_flags;
unsigned long state_flags;
+
+ /* Gvnic device's dma mask, set during probe. */
+ u8 dma_mask;
};
enum gve_service_task_flags {
@@ -451,8 +454,7 @@ static inline bool gve_can_recycle_pages(struct net_device *dev)
}
/* buffers */
-int gve_alloc_page(struct gve_priv *priv, struct device *dev,
- struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma,
enum dma_data_direction);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
@@ -518,7 +518,14 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir)
{
- *page = alloc_page(GFP_KERNEL);
+ gfp_t gfp_flags = GFP_KERNEL;
+
+ if (priv->dma_mask == 24)
+ gfp_flags |= GFP_DMA;
+ else if (priv->dma_mask == 32)
+ gfp_flags |= GFP_DMA32;
+
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -1083,6 +1090,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
__be32 __iomem *db_bar;
struct gve_registers __iomem *reg_bar;
struct gve_priv *priv;
+ u8 dma_mask;
int err;
err = pci_enable_device(pdev);
@@ -1095,19 +1103,6 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1122,10 +1117,28 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_reg_bar;
}
+ dma_mask = readb(®_bar->dma_mask);
+ // Default to 64 if the register isn't set
+ if (!dma_mask)
+ dma_mask = 64;
gve_write_version(®_bar->driver_version);
/* Get max queues to alloc etherdev */
max_rx_queues = ioread32be(®_bar->max_tx_queues);
max_tx_queues = ioread32be(®_bar->max_rx_queues);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
+ goto abort_with_reg_bar;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to set consistent dma mask: err=%d\n", err);
+ goto abort_with_reg_bar;
+ }
+
/* Alloc and setup the netdev and priv */
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
@@ -1160,6 +1173,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->db_bar2 = db_bar;
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
+ priv->dma_mask = dma_mask;
gve_set_probe_in_progress(priv);
@@ -16,7 +16,8 @@ struct gve_registers {
__be32 adminq_pfn;
__be32 adminq_doorbell;
__be32 adminq_event_counter;
- u8 reserved[3];
+ u8 reserved[2];
+ u8 dma_mask;
u8 driver_version;
};