diff mbox

[API-NEXT,RFC,26/31] drivers: ixgbe: adding basic driver

Message ID 1452285014-60320-27-git-send-email-christophe.milard@linaro.org
State New
Headers show

Commit Message

Christophe Milard Jan. 8, 2016, 8:30 p.m. UTC
At this stage, this is a hack intending to prove the viability of the
previously defined interfaces. It is largely inspired from the DPDK
ixgbe driver (hash 9702b2b53f250aa50973e6d86abce45b4a919eda), but largely
pruned to reduce the interfaces and complexity.
There is a lot to be improved here also depending on what we want to
achieve (thread safe / process safe / single queue sharing...)

Signed-off-by: Christophe Milard <christophe.milard@linaro.org>
---
 drivers/driver_init.c               |    2 +
 drivers/ixgbe/Makefile.am           |    2 +-
 drivers/ixgbe/ixgbe_common.h        |   23 +
 drivers/ixgbe/ixgbe_main.c          |  520 ++++++++++++++
 drivers/ixgbe/ixgbe_main.h          |    7 +
 drivers/ixgbe/ixgbe_rxtx.c          | 1279 +++++++++++++++++++++++++++++++++++
 drivers/ixgbe/ixgbe_rxtx.h          |  225 ++++++
 drivers/ixgbe/ixgbe_supported_dev.h |  119 ++++
 8 files changed, 2176 insertions(+), 1 deletion(-)
 create mode 100644 drivers/ixgbe/ixgbe_common.h
 create mode 100644 drivers/ixgbe/ixgbe_main.c
 create mode 100644 drivers/ixgbe/ixgbe_main.h
 create mode 100644 drivers/ixgbe/ixgbe_rxtx.c
 create mode 100644 drivers/ixgbe/ixgbe_rxtx.h
 create mode 100644 drivers/ixgbe/ixgbe_supported_dev.h
diff mbox

Patch

diff --git a/drivers/driver_init.c b/drivers/driver_init.c
index acacffa..6a83c67 100644
--- a/drivers/driver_init.c
+++ b/drivers/driver_init.c
@@ -5,7 +5,9 @@ 
  */
 
 #include "driver_init.h"
+#include "ixgbe/ixgbe_main.h"
 void _odp_driver_init(void)
 {
 	/* call each driver init function here */
+	ixgbe_init();
 }
diff --git a/drivers/ixgbe/Makefile.am b/drivers/ixgbe/Makefile.am
index 2c7f1f7..e8e6774 100644
--- a/drivers/ixgbe/Makefile.am
+++ b/drivers/ixgbe/Makefile.am
@@ -3,5 +3,5 @@  noinst_LTLIBRARIES = libixgbe.la
 
 SUBDIRS = base
 
-libixgbe_la_SOURCES =
+libixgbe_la_SOURCES = ixgbe_main.c ixgbe_rxtx.c
 libixgbe_la_LIBADD = base/libixgbe_base.la
diff --git a/drivers/ixgbe/ixgbe_common.h b/drivers/ixgbe/ixgbe_common.h
new file mode 100644
index 0000000..bf43a19
--- /dev/null
+++ b/drivers/ixgbe/ixgbe_common.h
@@ -0,0 +1,23 @@ 
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "base/ixgbe_type.h"
+#include <odp_driver.h>
+
+#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
+	(&((ixgbe_adapter_t *)adapter)->hw)
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+typedef struct ixgbe_adapter_t {
+	struct ixgbe_hw             hw;
+}ixgbe_adapter_t;
+
+
+#endif /* _IXGBE_COMMON_H_ */
diff --git a/drivers/ixgbe/ixgbe_main.c b/drivers/ixgbe/ixgbe_main.c
new file mode 100644
index 0000000..fba7af0
--- /dev/null
+++ b/drivers/ixgbe/ixgbe_main.c
@@ -0,0 +1,520 @@ 
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <odp_driver.h>
+#include "ixgbe_supported_dev.h"
+#include "ixgbe_common.h"
+#include "ixgbe_rxtx.h"
+#include "base/ixgbe_api.h"
+#include "ixgbe_logs.h"
+#include "ixgbe_main.h"
+
+static void ixgbe_unprobe (odp_pci_dev_t pci_dev, odp_nic_dev_t *nic_dev);
+static int ixgbe_dev_start(odp_nic_dev_t *dev);
+
+
+/*
+ * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
+ */
+static inline int
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+	switch (hw->phy.type) {
+	case ixgbe_phy_sfp_avago:
+	case ixgbe_phy_sfp_ftl:
+	case ixgbe_phy_sfp_intel:
+	case ixgbe_phy_sfp_unknown:
+	case ixgbe_phy_sfp_passive_tyco:
+	case ixgbe_phy_sfp_passive_unknown:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static inline int32_t
+ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
+{
+	uint32_t ctrl_ext;
+	int32_t status;
+
+	status = ixgbe_reset_hw(hw);
+
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return status;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int ixgbe_dev_start(odp_nic_dev_t *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int err;
+	bool link_up = 0, negotiate = 0;
+	uint32_t speed = 0;
+	int status;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* stop adapter */
+	hw->adapter_stopped = 0;
+	ixgbe_stop_adapter(hw);
+
+	/* reinitialize adapter
+	 * this calls reset and start */
+	status = ixgbe_pf_reset_hw(hw);
+	if (status != 0)
+		return -1;
+	hw->mac.ops.start_hw(hw);
+	hw->mac.get_link_status = true;
+
+// FIXME: Take care of that for interrupts...
+//	/* check and configure queue intr-vector mapping */
+//	if (dev->data->dev_conf.intr_conf.rxq != 0)
+//		intr_vector = dev->data->nb_rx_queues;
+//
+//	if (rte_intr_efd_enable(intr_handle, intr_vector))
+//		return -1;
+//
+//	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+//		intr_handle->intr_vec =
+//			rte_zmalloc("intr_vec",
+//				    dev->data->nb_rx_queues * sizeof(int),
+//				    0);
+//		if (intr_handle->intr_vec == NULL) {
+//			PMD_INIT_LOG_ERR("Failed to allocate %d rx_queues"
+//				     " intr_vec\n", dev->data->nb_rx_queues);
+//			return -ENOMEM;
+//		}
+//	}
+//
+//	/* confiugre msix for sleep until rx interrupt */
+//	ixgbe_configure_msix(dev);
+//
+	/* initialize transmission unit */
+	ixgbe_dev_tx_init(dev);
+
+	/* This can fail when allocating segments for descriptor rings */
+	err = ixgbe_dev_rx_init(dev);
+	if (err) {
+		PMD_INIT_LOG_ERR("Unable to initialize RX hardware");
+		goto error;
+	}
+
+	err = ixgbe_dev_rxtx_start(dev);
+	if (err < 0) {
+		PMD_INIT_LOG_ERR("Unable to start rxtx queues");
+		goto error;
+	}
+
+	if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+		err = hw->mac.ops.setup_sfp(hw);
+		if (err)
+			goto error;
+	}
+
+	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		/* Turn on the copper */
+		ixgbe_set_phy_power(hw, true);
+	} else {
+		/* Turn on the laser */
+		ixgbe_enable_tx_laser(hw);
+	}
+
+	err = ixgbe_check_link(hw, &speed, &link_up, 0);
+	if (err)
+		goto error;
+//	dev->data->dev_link.link_status = link_up;
+
+	err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
+	if (err)
+		goto error;
+
+	speed = (hw->mac.type != ixgbe_mac_82598EB) ?
+			IXGBE_LINK_SPEED_82599_AUTONEG :
+			IXGBE_LINK_SPEED_82598_AUTONEG;
+
+	err = ixgbe_setup_link(hw, speed, link_up);
+	if (err)
+		goto error;
+
+// FIXME: Take care of that for interrupts...
+//	/* check if lsc interrupt is enabled */
+//	if (dev->data->dev_conf.intr_conf.lsc != 0) {
+//		if (rte_intr_allow_others(intr_handle)) {
+//			rte_intr_callback_register(intr_handle,
+//						   ixgbe_dev_interrupt_handler,
+//						   (void *)dev);
+//			ixgbe_dev_lsc_interrupt_setup(dev);
+//		} else
+//			PMD_INIT_LOG(INFO, "lsc won't enable because of"
+//				     " no intr multiplex\n");
+//	}
+//
+//	/* check if rxq interrupt is enabled */
+//	if (dev->data->dev_conf.intr_conf.rxq != 0)
+//		ixgbe_dev_rxq_interrupt_setup(dev);
+//
+//	/* enable uio/vfio intr/eventfd mapping */
+//	rte_intr_enable(intr_handle);
+//
+//	/* resume enabled intr since hw reset */
+//	ixgbe_enable_intr(dev);
+//
+
+//	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK;
+//	ixgbe_vlan_offload_set(dev, mask);
+
+
+
+	return (0);
+
+error:
+	PMD_INIT_LOG_ERR("failure in ixgbe_dev_start(): %d", err);
+	return -1;
+}
+
+
+/*
+ * Set device link up: enable tx.
+ */
+static int ixgbe_dev_set_link_up(odp_nic_dev_t *nic_dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(nic_dev->data->dev_private);
+
+	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		/* Turn on the copper */
+		ixgbe_set_phy_power(hw, true);
+	} else {
+		/* Turn on the laser */
+		ixgbe_enable_tx_laser(hw);
+	}
+
+	return 0;
+}
+
+/*
+ * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
+ */
+static void
+ixgbe_disable_intr(struct ixgbe_hw *hw)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
+	}
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * Read MAC address
+ */
+static void ixgbe_dev_mac_get(odp_nic_dev_t *nic_dev, void *mac_addr)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(nic_dev->data->dev_private);
+
+	hw->mac.ops.get_mac_addr(hw, mac_addr);
+}
+
+static const odp_nic_dev_ops_t ixgbe_nic_dev_ops = {
+//	.dev_configure        = ixgbe_dev_configure,
+	.dev_start            = ixgbe_dev_start,
+	.dev_set_link_up      = ixgbe_dev_set_link_up,
+	.rx_queue_start	      = ixgbe_dev_rx_queue_start,
+	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
+	.unprobe	      = ixgbe_unprobe,
+	.mac_get	      = ixgbe_dev_mac_get,
+};
+
+
+/*
+ * This function is based on code in ixgbe_attach() in base/ixgbe.c.
+ * It returns 0 on success.
+ */
+static int eth_ixgbe_dev_init(odp_nic_dev_t *nic_dev, odp_pci_dev_t pci_dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(nic_dev->data->dev_private);
+	uint32_t ctrl_ext;
+	int diag;
+
+	PMD_INIT_FUNC_TRACE();
+
+	nic_dev->dev_ops = &ixgbe_nic_dev_ops;
+	nic_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+	nic_dev->tx_pkt_burst = &ixgbe_xmit_pkts_simple;
+
+	/* Vendor and Device ID need to be set before init of shared code */
+	hw->device_id = odp_pci_get_device(pci_dev);
+	hw->vendor_id = odp_pci_get_vendor(pci_dev);
+	hw->hw_addr = odp_pci_get_resource_addr(pci_dev, 0);
+	hw->allow_unsupported_sfp = 1;
+
+	/* Initialize the shared code (base driver) */
+	diag = ixgbe_init_shared_code(hw);
+
+	if (diag != IXGBE_SUCCESS) {
+		PMD_INIT_LOG_ERR("Shared code init failed: %d", diag);
+		return -1;
+	}
+
+	/* pick up the PCI bus settings for reporting later */
+	ixgbe_get_bus_info(hw);
+
+//	/* Unlock any pending hardware semaphore */
+//	ixgbe_swfw_lock_reset(hw);
+//
+//	/* Initialize DCB configuration*/
+//	memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
+//	ixgbe_dcb_init(hw,dcb_config);
+//	/* Get Hardware Flow Control setting */
+//	hw->fc.requested_mode = ixgbe_fc_full;
+//	hw->fc.current_mode = ixgbe_fc_full;
+//	hw->fc.pause_time = IXGBE_FC_PAUSE;
+//	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+//		hw->fc.low_water[i] = IXGBE_FC_LO;
+//		hw->fc.high_water[i] = IXGBE_FC_HI;
+//	}
+	hw->fc.send_xon = 0; //1;
+//
+//	/* Make sure we have a good EEPROM before we read from it */
+//	diag = ixgbe_validate_eeprom_checksum(hw, &csum);
+//	if (diag != IXGBE_SUCCESS) {
+//		PMD_INIT_LOG_ERR("The EEPROM checksum is not valid: %d", diag);
+//		return -EIO;
+//	}
+//
+	diag = ixgbe_init_hw(hw);
+//
+//	/*
+//	 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
+//	 * is called too soon after the kernel driver unbinding/binding occurs.
+//	 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
+//	 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
+//	 * also called. See ixgbe_identify_phy_82599(). The reason for the
+//	 * failure is not known, and only occuts when virtualisation features
+//	 * are disabled in the bios. A delay of 100ms  was found to be enough by
+//	 * trial-and-error, and is doubled to be safe.
+//	 */
+//	if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
+//		rte_delay_ms(200);
+//		diag = ixgbe_init_hw(hw);
+//	}
+//
+//	if (diag == IXGBE_ERR_EEPROM_VERSION) {
+//		PMD_INIT_LOG_ERR("This device is a pre-production adapter/"
+//		    "LOM.  Please be aware there may be issues associated "
+//		    "with your hardware.");
+//		PMD_INIT_LOG_ERR("If you are experiencing problems "
+//		    "please contact your Intel or hardware representative "
+//		    "who provided you with this hardware.");
+//	} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
+//		PMD_INIT_LOG_ERR("Unsupported SFP+ Module");
+//	if (diag) {
+//		PMD_INIT_LOG_ERR("Hardware Initialization Failure: %d", diag);
+//		return -EIO;
+//	}
+//
+//	/* Reset the hw statistics */
+//	ixgbe_dev_stats_reset(nic_dev);
+//
+
+	/* disable interrupt */
+	ixgbe_disable_intr(hw);
+
+//
+//	/* reset mappings for queue statistics hw counters*/
+//	ixgbe_reset_qstat_mappings(hw);
+//
+//	/* Allocate memory for storing MAC addresses */
+//	nic_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+//			hw->mac.num_rar_entries, 0);
+//	if (nic_dev->data->mac_addrs == NULL) {
+//		PMD_INIT_LOG_ERR(
+//			"Failed to allocate %u bytes needed to store "
+//			"MAC addresses",
+//			ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+//		return -ENOMEM;
+//	}
+//	/* Copy the permanent MAC address */
+//	ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+//			&nic_dev->data->mac_addrs[0]);
+//
+//	/* Allocate memory for storing hash filter MAC addresses */
+//	nic_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+//			IXGBE_VMDQ_NUM_UC_MAC, 0);
+//	if (nic_dev->data->hash_mac_addrs == NULL) {
+//		PMD_INIT_LOG_ERR(
+//			"Failed to allocate %d bytes needed to store MAC addresses",
+//			ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+//		return -ENOMEM;
+//	}
+//
+//	/* initialize the vfta */
+//	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+//
+//	/* initialize the hw strip bitmap*/
+//	memset(hwstrip, 0, sizeof(*hwstrip));
+//
+//	/* initialize PF if max_vfs not zero */
+//	ixgbe_pf_host_init(nic_dev);
+//
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	/* let hardware know driver is loaded */
+	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+	IXGBE_WRITE_FLUSH(hw);
+
+//	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+//		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
+//			     (int) hw->mac.type, (int) hw->phy.type,
+//			     (int) hw->phy.sfp_type);
+//	else
+//		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
+//			     (int) hw->mac.type, (int) hw->phy.type);
+//
+//	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+//			nic_dev->data->port_id, pci_dev->id.vendor_id,
+//			pci_dev->id.device_id);
+//
+//	/* enable support intr */
+//	ixgbe_enable_intr(nic_dev);
+//
+//	/* initialize 5tuple filter list */
+//	TAILQ_INIT(&filter_info->fivetuple_list);
+//	memset(filter_info->fivetuple_mask, 0,
+//		sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+//
+	return 0;
+}
+
+/*
+ * goes through the list of supported devices and return true if there is
+ * a match
+ */
+static int is_supported(uint16_t vendor,
+			uint16_t device,
+			uint16_t subsystem_vendor,
+			uint16_t subsystem_device)
+{
+	int i = 0;
+	while ((pci_id_ixgbe_map[i].vendor_id != 0) &&
+	       (pci_id_ixgbe_map[i].device_id != 0) &&
+	       (pci_id_ixgbe_map[i].subsystem_vendor_id != 0) &&
+	       (pci_id_ixgbe_map[i].subsystem_device_id != 0)) {
+
+		if ((pci_id_ixgbe_map[i].vendor_id == vendor) &&
+		    (pci_id_ixgbe_map[i].device_id == device) &&
+		    ((pci_id_ixgbe_map[i].subsystem_vendor_id ==
+					subsystem_vendor) ||
+		     (pci_id_ixgbe_map[i].subsystem_vendor_id ==
+					PCI_ANY_ID)) &&
+		    ((pci_id_ixgbe_map[i].subsystem_device_id ==
+					subsystem_device) ||
+		     (pci_id_ixgbe_map[i].subsystem_device_id ==
+					PCI_ANY_ID)))
+			return 1;
+
+		i++;
+	}
+	return 0;
+}
+
+/*
+ * main probe function
+ */
+static odp_nic_dev_t *probe (odp_pci_dev_t pci_dev, odp_pktio_t pktio,
+		  odp_nic_sgmt_pool_t rx_sgmt_pool,
+		  odp_nic_sgmt_pool_t tx_sgmt_pool)
+{
+	int ret;
+	odp_nic_dev_t *nic_dev;
+
+	/* check if this driver can support the device: */
+	if (!is_supported(odp_pci_get_vendor(pci_dev),
+			  odp_pci_get_device(pci_dev),
+			  odp_pci_get_subsystem_vendor(pci_dev),
+			  odp_pci_get_subsystem_device(pci_dev)))
+		return NULL;
+
+	/* alloc nic_dev with 1 RX and 1 TX queue*/
+	nic_dev = odp_nic_dev_alloc(odp_pci_get_addr_str(pci_dev),
+				    sizeof(ixgbe_adapter_t),1,1, pktio);
+	if (!nic_dev)
+		return NULL;
+
+	/* init */
+        eth_ixgbe_dev_init(nic_dev, pci_dev);
+
+	/* init one RX queue for the time being */
+	ret=nic_dev->dev_ops->rx_queue_setup(nic_dev, pci_dev, 0, 32,
+					     rx_sgmt_pool);
+	if(ret<0) {
+		printf("rx_queue_setup failed!\n");
+		odp_nic_dev_free(nic_dev);
+		return NULL;
+	}
+
+	/* init one TX queue for the time being */
+	ret=nic_dev->dev_ops->tx_queue_setup(nic_dev, pci_dev, 0, 32,
+					     tx_sgmt_pool);
+	if(ret<0) {
+		printf("tx_queue_setup failed!\n");
+		/* FIXME: release previously allocated RX queue */
+		odp_nic_dev_free(nic_dev);
+		return NULL;
+	}
+
+
+	nic_dev->dev_ops->dev_start(nic_dev);
+	nic_dev->dev_ops->dev_set_link_up(nic_dev);
+
+	return nic_dev;
+}
+
+/*
+ * main unprobe function, undoing a *successful* probe
+ */
+static void ixgbe_unprobe (odp_pci_dev_t pci_dev ODP_UNUSED,
+			   odp_nic_dev_t *nic_dev ODP_UNUSED)
+{
+  /*FIXME*/
+}
+
+
+static odp_nic_driver_t ixgbe_pmd= {
+	.name    = "ixgbeSimple",
+	.probe   = probe,
+};
+
+void ixgbe_init(void)
+{
+	odp_nic_driver_register(&ixgbe_pmd);\
+}
+
+
diff --git a/drivers/ixgbe/ixgbe_main.h b/drivers/ixgbe/ixgbe_main.h
new file mode 100644
index 0000000..35148f2
--- /dev/null
+++ b/drivers/ixgbe/ixgbe_main.h
@@ -0,0 +1,7 @@ 
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+void ixgbe_init(void);
diff --git a/drivers/ixgbe/ixgbe_rxtx.c b/drivers/ixgbe/ixgbe_rxtx.c
new file mode 100644
index 0000000..99027a4
--- /dev/null
+++ b/drivers/ixgbe/ixgbe_rxtx.c
@@ -0,0 +1,1279 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define IXGBE_DEBUG_RX 1
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <odp_driver.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_osdep.h"
+#include "ixgbe_common.h"
+#include "ixgbe_rxtx.h"
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+/* Default RS bit threshold values */
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH   16
+#endif
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 16
+#endif
+
+
+
+/*********************************************************************
+ *
+ *  TX functions
+ *
+ **********************************************************************/
+
+static void ixgbe_reset_tx_queue(ixgbe_tx_queue_t *txq)
+{
+	static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+	ixgbe_tx_entry_t *txe = txq->sw_ring;
+	uint16_t prev, i;
+
+	/* Zero out HW ring memory */
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		txq->tx_ring[i] = zeroed_desc;
+	}
+
+	/* Initialize SW ring entries */
+	prev = (uint16_t) (txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+		txd->wb.status = IXGBE_CPU_TO_LE32(IXGBE_TXD_STAT_DD);
+		txe[i].sgmt = ODP_NIC_SGMT_INVALID;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	txq->tx_tail = 0;
+	txq->nb_tx_used = 0;
+	/*
+	 * Always allow 1 descriptor to be un-allocated to avoid
+	 * a H/W race condition
+	 */
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->ctx_curr = 0;
+	//memset((void*)&txq->ctx_cache, 0,
+	//	IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static const ixgbe_txq_ops_t def_txq_ops = {
+//	.release_mbufs = ixgbe_tx_queue_release_mbufs,
+//	.free_swring = ixgbe_tx_free_swring,
+	.reset = ixgbe_reset_tx_queue,
+};
+
+
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+static int ixgbe_dev_tx_queue_start(odp_nic_dev_t *dev, uint16_t tx_queue_id)
+{
+	struct ixgbe_hw     *hw;
+	ixgbe_tx_queue_t *txq;
+	uint32_t txdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (tx_queue_id < dev->data->nb_tx_queues) {
+		txq = dev->data->tx_queues[tx_queue_id];
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+		txdctl |= IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+		/* Wait until TX Enable ready */
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			poll_ms = IXGBE_POLL_WAIT_10_MS;
+			do {
+				DELAY(1000); /* 1ms */
+				txdctl = IXGBE_READ_REG(hw,
+					IXGBE_TXDCTL(txq->reg_idx));
+			} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+			if (!poll_ms)
+				PMD_INIT_LOG_ERR("Could not enable "
+					     "Tx Queue %d", tx_queue_id);
+		}
+		wmb();
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+	} else
+		return -1;
+
+	return 0;
+}
+
+int ixgbe_dev_tx_queue_setup(odp_nic_dev_t *nic_dev,
+			 odp_pci_dev_t pci_dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 odp_nic_sgmt_pool_t sgmt_pool)
+			 //unsigned int socket_id,
+			 //const struct rte_eth_txconf *tx_conf)
+{
+	odp_dma_map_t sgmts_dma_region;
+	odp_dma_map_t ring_dma_region;
+	ixgbe_tx_queue_t *txq;
+	struct ixgbe_hw     *hw;
+	uint16_t tx_rs_thresh, tx_free_thresh;
+	char descr_name[ODP_SHM_NAME_LEN];
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(nic_dev->data->dev_private);
+
+	/*
+	 * Validate number of transmit descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of IXGBE_ALIGN.
+	 */
+	if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
+	    (nb_desc > IXGBE_MAX_RING_DESC) ||
+	    (nb_desc < IXGBE_MIN_RING_DESC)) {
+		return -EINVAL;
+	}
+
+	/*
+	 * The following two parameters control the setting of the RS bit on
+	 * transmit descriptors.
+	 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
+	 * descriptors have been used.
+	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+	 * descriptors are used or if the number of descriptors required
+	 * to transmit a packet is greater than the number of free TX
+	 * descriptors.
+	 * The following constraints must be satisfied:
+	 *  tx_rs_thresh must be greater than 0.
+	 *  tx_rs_thresh must be less than the size of the ring minus 2.
+	 *  tx_rs_thresh must be less than or equal to tx_free_thresh.
+	 *  tx_rs_thresh must be a divisor of the ring size.
+	 *  tx_free_thresh must be greater than 0.
+	 *  tx_free_thresh must be less than the size of the ring minus 3.
+	 * One descriptor in the TX ring is used as a sentinel to avoid a
+	 * H/W race condition, hence the maximum threshold constraints.
+	 * When set to zero use default values.
+	 */
+	tx_rs_thresh = (uint16_t) DEFAULT_TX_RS_THRESH;
+	tx_free_thresh = (uint16_t) DEFAULT_TX_FREE_THRESH;
+	if (tx_rs_thresh >= (nb_desc - 2)) {
+		PMD_INIT_LOG_ERR("tx_rs_thresh must be less than the number "
+			     "of TX descriptors minus 2. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)nic_dev->data->port_id, (int)queue_idx);
+		return -1;
+	}
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG_ERR("tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)nic_dev->data->port_id, (int)queue_idx);
+		return -1;
+	}
+	if (tx_rs_thresh > tx_free_thresh) {
+		PMD_INIT_LOG_ERR("tx_rs_thresh must be less than or equal to "
+			     "tx_free_thresh. (tx_free_thresh=%u "
+			     "tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (unsigned int)tx_rs_thresh,
+			     (int)nic_dev->data->port_id,
+			     (int)queue_idx);
+		return -1;
+	}
+	if ((nb_desc % tx_rs_thresh) != 0) {
+		PMD_INIT_LOG_ERR("tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)nic_dev->data->port_id, (int)queue_idx);
+		return -1;
+	}
+
+//	/* Free memory prior to re-allocation if needed... */
+//	if (dev->data->tx_queues[queue_idx] != NULL) {
+//		ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+//		dev->data->tx_queues[queue_idx] = NULL;
+//	}
+
+	/* First allocate the tx queue data structure */
+	txq = malloc(sizeof(ixgbe_tx_queue_t));
+	if (txq == NULL)
+		return (-1);
+	memset(txq, 0, sizeof(ixgbe_tx_queue_t));
+
+	/* DMA map the memory pool from which segments are recved: */
+	sgmts_dma_region = odp_nic_sgmt_pool_dma_map(sgmt_pool);
+	if (odp_pci_map_dma_region(pci_dev, sgmts_dma_region) <0) {
+		//FIXME: ixgbe_tx_queue_release(rxq);
+		return (-1);
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_rs_thresh = tx_rs_thresh;
+	txq->tx_free_thresh = tx_free_thresh;
+//	txq->pthresh = tx_conf->tx_thresh.pthresh;
+//	txq->hthresh = tx_conf->tx_thresh.hthresh;
+//	txq->wthresh = tx_conf->tx_thresh.wthresh;
+	txq->queue_id = queue_idx;
+	txq->reg_idx = (uint16_t)queue_idx;
+	txq->port_id = nic_dev->data->port_id;
+//	txq->txq_flags = tx_conf->txq_flags;
+	txq->ops = &def_txq_ops;
+//	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+	txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
+
+
+	/*
+	 * Allocate TX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	snprintf(descr_name, sizeof(descr_name),
+		 "TX ring descr: %s:%d:%d",
+		 odp_pci_get_addr_str(pci_dev),
+		 txq->port_id, queue_idx);
+	odp_shm_t shm; //FIXME: save and realease
+	shm = odp_shm_reserve(descr_name,
+			      TX_RING_SZ,
+			      ODP_CACHE_LINE_SIZE, ODP_SHM_DMA);
+	ring_dma_region = odp_shm_get_dma_map(shm);
+	if (ring_dma_region == ODP_DMA_REGION_INVALID) {
+		//FIXME: ixgbe_tx_queue_release(rxq);
+		return (-1);
+	}
+	if (odp_pci_map_dma_region(pci_dev, ring_dma_region) <0) {
+		//FIXME: ixgbe_tx_queue_release(rxq);
+		return (-1);
+	}
+
+	txq->tx_ring_dma_addr = odp_dma_map_get_dma_addr(ring_dma_region);
+	txq->tx_ring = (union ixgbe_adv_tx_desc *)
+			odp_dma_map_get_addr(ring_dma_region);
+
+	/* Allocate software ring */
+	txq->sw_ring = malloc(sizeof(ixgbe_tx_entry_t) * nb_desc);
+	if (txq->sw_ring == NULL) {
+		//ixgbe_tx_queue_release(txq);
+		return (-1);
+	}
+	PMD_INIT_LOG_DBG("sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_dma_addr);
+
+	txq->ops->reset(txq);
+
+	nic_dev->data->tx_queues[queue_idx] = txq;
+
+
+	return (0);
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void
+ixgbe_dev_tx_init(odp_nic_dev_t *dev)
+{
+	struct ixgbe_hw     *hw;
+	ixgbe_tx_queue_t *txq;
+	odp_dma_addr_t bus_addr;
+	uint32_t hlreg0;
+	uint32_t txctrl;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Enable TX CRC (checksum offload requirement) and hw padding
+	 * (TSO requirement) */
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+	/* Setup the Base and Length of the Tx Descriptor Rings */
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+
+		bus_addr = txq->tx_ring_dma_addr;
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
+				(uint32_t)((uint64_t)bus_addr & 0x00000000ffffffffULL));
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
+				(uint32_t)((uint64_t)bus_addr >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
+				txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+		/* Setup the HW Tx Head and TX Tail descriptor pointers */
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+
+		/*
+		 * Disable Tx Head Writeback RO bit, since this hoses
+		 * bookkeeping if things aren't delivered in order.
+		 */
+		switch (hw->mac.type) {
+			case ixgbe_mac_82598EB:
+				txctrl = IXGBE_READ_REG(hw,
+							IXGBE_DCA_TXCTRL(txq->reg_idx));
+				txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+				IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+						txctrl);
+				break;
+
+			case ixgbe_mac_82599EB:
+			case ixgbe_mac_X540:
+			case ixgbe_mac_X550:
+			case ixgbe_mac_X550EM_x:
+			default:
+				txctrl = IXGBE_READ_REG(hw,
+						IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
+				txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+				IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+						txctrl);
+				break;
+		}
+	}
+
+	/* Device configured with multiple TX queues. */
+	//ixgbe_dev_mq_tx_configure(dev);
+}
+
+
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ * ... which is always txq->tx_rs_thresh, as freeing occurs blockwise.
+ */
+static inline int ixgbe_tx_free_bufs(ixgbe_tx_queue_t *txq)
+{
+	ixgbe_tx_entry_t *txep;
+	uint32_t status;
+	int i;
+
+	/* check DD bit on threshold descriptor */
+	status = txq->tx_ring[txq->tx_next_dd].wb.status;
+	if (!(status & IXGBE_CPU_TO_LE32(IXGBE_ADVTXD_STAT_DD)))
+		return 0;
+
+	/*
+	 * first segment to free from S/W ring is at index
+	 * tx_next_dd - (tx_rs_thresh-1)
+	 */
+	txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+	/* free buffers one at a time */
+	for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+		odp_nic_sgmt_free(txep->sgmt);
+		txep->sgmt = ODP_NIC_SGMT_INVALID;
+	}
+
+	/* buffers were freed, update counters */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return txq->tx_rs_thresh;
+}
+
+
+/* Populate 4 descriptors with data from 4 segments */
+static inline void
+tx4(volatile union ixgbe_adv_tx_desc *txdp, odp_nic_sgmt_t *sgmts)
+{
+	odp_dma_addr_t sgmt_dma_addr;
+	uint32_t sgmt_len;
+	int i;
+
+	for (i = 0; i < 4; ++i, ++txdp, ++sgmts) {
+		sgmt_dma_addr = odp_nic_sgmt_get_dma_addr(*sgmts);
+		sgmt_len = odp_nic_sgmt_get_datalen(*sgmts);
+
+		/* write data to descriptor */
+		txdp->read.buffer_addr = IXGBE_CPU_TO_LE64(sgmt_dma_addr);
+
+		txdp->read.cmd_type_len =
+		     IXGBE_CPU_TO_LE32((uint32_t)DCMD_DTYP_FLAGS | sgmt_len);
+
+		txdp->read.olinfo_status =
+		     IXGBE_CPU_TO_LE32(sgmt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+		//rte_prefetch0(&(*sgmts)->pool);
+	}
+}
+
+/* Populate 1 descriptor with data from 1 segment */
+static inline void
+tx1(volatile union ixgbe_adv_tx_desc *txdp, odp_nic_sgmt_t *sgmts)
+{
+	odp_dma_addr_t sgmt_dma_addr;
+	uint32_t sgmt_len;
+	sgmt_dma_addr = odp_nic_sgmt_get_dma_addr(*sgmts);
+	sgmt_len = odp_nic_sgmt_get_datalen(*sgmts);
+
+	/* write data to descriptor */
+	txdp->read.buffer_addr = IXGBE_CPU_TO_LE64(sgmt_dma_addr);
+
+	txdp->read.cmd_type_len =
+		IXGBE_CPU_TO_LE32((uint32_t)DCMD_DTYP_FLAGS | sgmt_len);
+
+	txdp->read.olinfo_status =
+		IXGBE_CPU_TO_LE32(sgmt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+	//rte_prefetch0(&(*sgmts)->pool);
+
+}
+
+
+/*
+ * Fill H/W descriptor ring with segment data.
+ * Copy segment pointers to the S/W ring.
+ */
+static inline void
+ixgbe_tx_fill_hw_ring(ixgbe_tx_queue_t *txq, odp_nic_sgmt_t *sgmts,
+		      uint16_t nb_sgmts)
+{
+	volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+	ixgbe_tx_entry_t *txep = &(txq->sw_ring[txq->tx_tail]);
+	const int N_PER_LOOP = 4;
+	const int N_PER_LOOP_MASK = N_PER_LOOP-1;
+	int mainpart, leftover;
+	int i, j;
+
+	/*
+	 * Process most of the packets in chunks of N sgmts.  Any
+	 * leftover packets will get processed one at a time.
+	 */
+	mainpart = (nb_sgmts & ((uint32_t) ~N_PER_LOOP_MASK));
+	leftover = (nb_sgmts & ((uint32_t)  N_PER_LOOP_MASK));
+	for (i = 0; i < mainpart; i += N_PER_LOOP) {
+		/* Copy N mbuf pointers to the S/W ring */
+		for (j = 0; j < N_PER_LOOP; ++j) {
+			(txep + i + j)->sgmt = *(sgmts + i + j);
+		}
+		tx4(txdp + i, sgmts + i);
+	}
+
+	if (odp_unlikely(leftover > 0)) {
+		for (i = 0; i < leftover; ++i) {
+			(txep + mainpart + i)->sgmt = *(sgmts + mainpart + i);
+			tx1(txdp + mainpart + i, sgmts + mainpart + i);
+		}
+	}
+}
+
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, odp_nic_sgmt_t *sgmts,
+	     uint16_t nb_sgmts)
+{
+	ixgbe_tx_queue_t *txq = (ixgbe_tx_queue_t *)tx_queue;
+	volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+	uint16_t n = 0;
+
+	/*
+	 * Begin scanning the H/W ring for done descriptors when the
+	 * number of available descriptors drops below tx_free_thresh.  For
+	 * each done descriptor, free the associated buffer.
+	 */
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		ixgbe_tx_free_bufs(txq);
+
+	/* Only use descriptors that are available */
+	nb_sgmts = (uint16_t)min(txq->nb_tx_free, nb_sgmts);
+	if (odp_unlikely(nb_sgmts == 0))
+		return 0;
+
+	/*
+	 * At this point, we know there are enough descriptors in the
+	 * ring to transmit all the packets.  This assumes that each
+	 * mbuf contains a single segment, and that no new offloads
+	 * are expected, which would require a new context descriptor.
+	 */
+
+	/* Use exactly nb_sgmts descriptors */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_sgmts);
+
+	/*
+	 * See if we're going to wrap-around. If so, handle the top
+	 * of the descriptor ring first, then do the bottom.  If not,
+	 * the processing looks just like the "bottom" part anyway...
+	 */
+	if ((txq->tx_tail + nb_sgmts) > txq->nb_tx_desc) {
+		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+		ixgbe_tx_fill_hw_ring(txq, sgmts, n);
+
+		/*
+		 * We know that the last descriptor in the ring will need to
+		 * have its RS bit set because tx_rs_thresh has to be
+		 * a divisor of the ring size
+		 */
+		tx_r[txq->tx_next_rs].read.cmd_type_len |=
+			IXGBE_CPU_TO_LE32(IXGBE_ADVTXD_DCMD_RS);
+		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+		txq->tx_tail = 0;
+	}
+
+	/* Fill H/W descriptor ring with segment data */
+	ixgbe_tx_fill_hw_ring(txq, sgmts + n, (uint16_t)(nb_sgmts - n));
+	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_sgmts - n));
+
+	/*
+	 * Determine if RS bit should be set
+	 * This is what we actually want:
+	 *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
+	 * but instead of subtracting 1 and doing >=, we can just do
+	 * greater than without subtracting.
+	 */
+	if (txq->tx_tail > txq->tx_next_rs) {
+		tx_r[txq->tx_next_rs].read.cmd_type_len |=
+			IXGBE_CPU_TO_LE32(IXGBE_ADVTXD_DCMD_RS);
+		txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+						txq->tx_rs_thresh);
+		if (txq->tx_next_rs >= txq->nb_tx_desc)
+			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+	}
+
+	/*
+	 * Check for wrap-around. This would only happen if we used
+	 * up to the last descriptor in the ring, no more, no less.
+	 */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+
+	/* update tail pointer */
+	wmb();
+	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+	return nb_sgmts;
+}
+
+
+uint16_t
+ixgbe_xmit_pkts_simple(void *tx_queue, odp_nic_sgmt_t *sgmts,
+		       uint16_t nb_sgmts)
+{
+	uint16_t nb_tx;
+
+	/* Try to transmit at least chunks of TX_MAX_BURST pkts */
+	if (odp_likely(nb_sgmts <= PMD_IXGBE_TX_MAX_BURST))
+		return tx_xmit_pkts(tx_queue, sgmts, nb_sgmts);
+
+	/* transmit more than the max burst, in chunks of TX_MAX_BURST */
+	nb_tx = 0;
+	while (nb_sgmts) {
+		uint16_t ret, n;
+		n = (uint16_t)min(nb_sgmts, PMD_IXGBE_TX_MAX_BURST);
+		ret = tx_xmit_pkts(tx_queue, &(sgmts[nb_tx]), n);
+		nb_tx = (uint16_t)(nb_tx + ret);
+		nb_sgmts = (uint16_t)(nb_sgmts - ret);
+		if (ret < n)
+			break;
+	}
+
+	return nb_tx;
+}
+
+
+
+/*********************************************************************
+ *
+ *  RX functions
+ *
+ **********************************************************************/
+
+
+
+
+static void
+ixgbe_rss_disable(odp_nic_dev_t *dev)
+{
+	struct ixgbe_hw *hw;
+	uint32_t mrqc;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	mrqc &= ~IXGBE_MRQC_RSSEN;
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+
+static void
+ixgbe_reset_rx_queue(ixgbe_adapter_t *adapter ODP_UNUSED, ixgbe_rx_queue_t *rxq)
+{
+	static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
+	unsigned i;
+	uint16_t len = rxq->nb_rx_desc;
+
+
+	/*
+	 * Zero out HW ring memory. Zero out extra memory at the end of
+	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+	 * reads extra memory as zeros.
+	 */
+	for (i = 0; i < len; i++) {
+		rxq->rx_ring[i] = zeroed_desc;
+	}
+
+
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+
+}
+
+
+
+/*
+ * init rx queue
+ */
+
+int ixgbe_dev_rx_queue_setup(odp_nic_dev_t *nic_dev,
+			 odp_pci_dev_t pci_dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 odp_nic_sgmt_pool_t sgmt_pool)
+{
+	odp_dma_map_t sgmts_dma_region;
+	odp_dma_map_t ring_dma_region;
+	ixgbe_rx_queue_t *rxq;
+	struct ixgbe_hw     *hw;
+	uint16_t len;
+	struct ixgbe_adapter_t *adapter =
+		(struct ixgbe_adapter_t *)nic_dev->data->dev_private;
+	char descr_name[ODP_SHM_NAME_LEN];
+
+	PMD_INIT_FUNC_TRACE();
+	hw = &adapter->hw;
+
+	/* First allocate the rx queue data structure */
+	rxq = (ixgbe_rx_queue_t*) malloc( sizeof(ixgbe_rx_queue_t));
+	if (rxq == NULL)
+		return (-1);
+
+	/* DMA map the memory pool from which segments are taken: */
+	sgmts_dma_region = odp_nic_sgmt_pool_dma_map(sgmt_pool);
+	if (odp_pci_map_dma_region(pci_dev, sgmts_dma_region) <0) {
+		//FIXME: ixgbe_rx_queue_release(rxq);
+		return (-1);
+	}
+	rxq->sgmt_pool = sgmt_pool;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = 0; /* do not hold */
+	rxq->queue_id = queue_idx;
+	rxq->port_id = nic_dev->data->port_id;
+	rxq->reg_idx = queue_idx;
+	rxq->crc_len = 0;
+
+	/*
+	 * Allocate RX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	snprintf(descr_name, sizeof(descr_name),
+		 "RX ring descr: %s:%d:%d",
+		 odp_pci_get_addr_str(pci_dev),
+		 rxq->port_id, queue_idx);
+	odp_shm_t shm; //FIXME: save and realease
+	shm = odp_shm_reserve(descr_name,
+			      RX_RING_SZ,
+			      ODP_CACHE_LINE_SIZE, ODP_SHM_DMA);
+	ring_dma_region = odp_shm_get_dma_map(shm);
+	if (ring_dma_region == ODP_DMA_REGION_INVALID) {
+		//FIXME: ixgbe_rx_queue_release(rxq);
+		return (-1);
+	}
+	if (odp_pci_map_dma_region(pci_dev, ring_dma_region) <0) {
+		//FIXME: ixgbe_rx_queue_release(rxq);
+		return (-1);
+	}
+
+	/*
+	 * Zero init all the descriptors in the ring.
+	 */
+	memset (odp_dma_map_get_addr(ring_dma_region), 0, RX_RING_SZ);
+
+	rxq->rdt_reg_addr =
+		IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
+	rxq->rdh_reg_addr =
+		IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
+
+	rxq->rx_ring_dma_addr = odp_dma_map_get_dma_addr(ring_dma_region);
+
+	rxq->rx_ring = (union ixgbe_adv_rx_desc *)
+			odp_dma_map_get_addr(ring_dma_region);
+
+
+	/*
+	 * Allocate software ring. Allow for space at the end of the
+	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+	 * function does not access an invalid memory region.
+	 */
+	len = nb_desc;
+	rxq->sw_ring = malloc( sizeof(ixgbe_rx_entry_t) * len );
+
+	if (!rxq->sw_ring) {
+		//ixgbe_rx_queue_release(rxq);
+		return (-1);
+	}
+
+
+	PMD_INIT_LOG_DBG("sw_ring=%p hw_ring=%p "
+			    "dma_addr=0x%"PRIx64,
+		     rxq->sw_ring, rxq->rx_ring,
+		     rxq->rx_ring_dma_addr);
+
+	nic_dev->data->rx_queues[queue_idx] = rxq;
+
+	ixgbe_reset_rx_queue(adapter, rxq);
+
+	return 0;
+}
+
+
+/*
+ * allocate new rx segments to populate initial rx descriptors
+ */
+static int ixgbe_alloc_rx_queue_segmts(ixgbe_rx_queue_t *rxq)
+{
+	ixgbe_rx_entry_t *rxe = rxq->sw_ring;
+	uint64_t dma_addr;
+	volatile union ixgbe_adv_rx_desc *rxd;
+	odp_nic_sgmt_t segment;
+	unsigned i;
+
+	/* Initialize software ring entries */
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		segment = odp_nic_sgmt_alloc(rxq->sgmt_pool);
+		if (segment == ODP_NIC_SGMT_INVALID) {
+			PMD_INIT_LOG_ERR("RX segment alloc failed queue_id=%u",
+				     (unsigned) rxq->queue_id);
+			return (-ENOMEM);
+		}
+
+		dma_addr =
+			IXGBE_CPU_TO_LE64(odp_nic_sgmt_get_dma_addr(segment));
+		rxd = &rxq->rx_ring[i];
+		rxd->read.hdr_addr = 0;
+		rxd->read.pkt_addr = dma_addr;
+		rxe[i].sgmt = segment;
+	}
+
+	return 0;
+}
+
+
+/*
+ * Initializes Receive Unit.
+ */
+int
+ixgbe_dev_rx_init(odp_nic_dev_t *dev)
+{
+	struct ixgbe_hw     *hw;
+	ixgbe_rx_queue_t *rxq;
+	uint64_t bus_addr;
+	uint32_t rxctrl;
+	uint32_t fctrl;
+	uint32_t hlreg0;
+	uint32_t srrctl;
+	uint32_t rdrxctl;
+	uint32_t rxcsum;
+	uint16_t buf_size;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/*
+	 * Make sure receives are disabled while setting
+	 * up the RX context (registers, descriptor rings, etc.).
+	 */
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+	/* Enable receipt of broadcasted frames */
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl |= IXGBE_FCTRL_BAM;
+	fctrl |= IXGBE_FCTRL_DPF;
+	fctrl |= IXGBE_FCTRL_PMCF;
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+	/*
+	 * Configure CRC stripping, if any.
+	 */
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	if (1) //(rx_conf->hw_strip_crc)
+		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
+	else
+		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+
+	hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+
+	hlreg0 &= ~IXGBE_HLREG0_LPBK;
+
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+	/* Setup RX queues */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		/*
+		 * Reset crc_len in case it was changed after queue setup by a
+		 * call to configure.
+		 */
+		//rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+		rxq->crc_len = 0;
+
+		/* Setup the Base and Length of the Rx Descriptor Rings */
+		bus_addr = rxq->rx_ring_dma_addr;
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
+				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
+				rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
+
+		/* Configure the SRRCTL register */
+		srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		/* Set if packets are dropped when no descriptors available */
+		//if (rxq->drop_en)
+		if (0)
+			srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+		/*
+		 * Configure the RX buffer size in the BSIZEPACKET field of
+		 * the SRRCTL register of the queue.
+		 * The value is in 1 KB resolution. Valid values can be from
+		 * 1 KB to 16 KB.
+		 */
+		odp_nic_sgmt_pool_info_t info;
+		odp_nic_sgmt_pool_info(rxq->sgmt_pool, &info);
+		//FIXME: size must be 1K aligned???
+		buf_size = (uint16_t)(info.nic_max_seg_size);
+		srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+			   IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+
+		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+	}
+
+	/*
+	 * Device configured with multiple RX queues.
+	 */
+	ixgbe_rss_disable(dev);
+
+	/*
+	 * Setup the Checksum Register.
+	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+	 * Enable IP/L4 checkum computation by hardware if requested to do so.
+	 */
+	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+	rxcsum |= IXGBE_RXCSUM_PCSD;
+	if (1)//(rx_conf->hw_ip_checksum)
+		rxcsum |= IXGBE_RXCSUM_IPPCSE;
+	else
+		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540) {
+		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+		if (1) //(rx_conf->hw_strip_crc)
+			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+		else
+			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+	}
+
+//	rc = ixgbe_set_rsc(dev);
+//	if (rc)
+//		return rc;
+//
+
+	dev->rx_pkt_burst = ixgbe_recv_pkts;
+
+	return 0;
+}
+
+
+
+/*
+ * Start Transmit and Receive Units.
+ */
+int
+ixgbe_dev_rxtx_start(odp_nic_dev_t *dev)
+{
+	struct ixgbe_hw  *hw;
+	ixgbe_tx_queue_t *txq;
+	uint32_t txdctl;
+	uint32_t dmatxctl;
+	uint32_t rxctrl;
+	uint16_t i;
+	int ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		/* Setup Transmit Threshold Registers */
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+		txdctl |= txq->pthresh & 0x7F;
+		txdctl |= ((txq->hthresh & 0x7F) << 8);
+		txdctl |= ((txq->wthresh & 0x7F) << 16);
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+	}
+
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+		dmatxctl |= IXGBE_DMATXCTL_TE;
+		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+	//	if (!txq->tx_deferred_start) {
+			ret = ixgbe_dev_tx_queue_start(dev, i);
+			if (ret < 0)
+				return ret;
+	//	}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		ret = ixgbe_dev_rx_queue_start(dev, i);
+		if (ret < 0)
+			return ret;
+	}
+
+//	/* other initialisation by erachmi: FIXME! */
+//	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+//		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); /* multicast MAC adress config and selection (0=none). must be set by SW. index is queue*/
+//		IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), 0); /* disable 5-tuple filtering for this queue*/
+//		IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); /* disable ethertype filter*/
+//	}
+//	for (i = 0; i < 128; i++) { //nb of pools
+//		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); /*vf pool selection: none for the 128 pools NOTE doc says modulo 4, while define is 8!!! */
+//		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); /*vf pool selection: none for the 128 pools */
+//	}
+//	for (i = 0; i < 128; i++) { //i.e. number of possible VLNA ID/32 bits per register: 1 bit per vlan id
+//		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0); /*all packet with vlan id are dropped */
+//	}
+//
+//	for (i = 0; i < 63; i++) { //i.e. number vf pool (only 0 used in non VF)
+//		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), 0); /* header split config. index is vf pool, 0 for pf. value 0 for no spit */
+//	}
+//	IXGBE_WRITE_REG(hw, IXGBE_RQTC, 0);    /* RSS: same queue for all TC*/
+//
+	/* Enable Receive engine */
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		rxctrl |= IXGBE_RXCTRL_DMBYPS;
+	rxctrl |= IXGBE_RXCTRL_RXEN;
+	hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+	return 0;
+}
+
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int ixgbe_dev_rx_queue_start(odp_nic_dev_t *nic_dev, uint16_t rx_queue_id)
+{
+	struct ixgbe_hw     *hw;
+	ixgbe_rx_queue_t *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+	struct ixgbe_adapter_t *adapter =
+		(struct ixgbe_adapter_t *)nic_dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = &adapter->hw;
+
+	if (rx_queue_id < nic_dev->data->nb_rx_queues) {
+		rxq = nic_dev->data->rx_queues[rx_queue_id];
+
+		/* Allocate buffers for descriptor rings */
+		if (ixgbe_alloc_rx_queue_segmts(rxq) != 0) {
+			PMD_INIT_LOG_ERR("Could not alloc segment for queue:%d",
+				     rx_queue_id);
+			return -1;
+		}
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+		rxdctl |= IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+		/* Wait until RX Enable ready */
+		poll_ms = IXGBE_POLL_WAIT_10_MS;
+		do {
+			DELAY(1000); /* 1ms */
+			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+		if (!poll_ms)
+			PMD_INIT_LOG_ERR("Could not enable Rx Queue %d",
+				     rx_queue_id);
+		wmb();
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+	} else
+		return -1;
+
+	PMD_INIT_LOG_ERR("STARTED Rx Queue %d\n", rx_queue_id);
+	return 0;
+}
+
+
+uint16_t
+ixgbe_recv_pkts(void *rx_queue, odp_nic_sgmt_t *rx_sgmts,
+		uint16_t nb_sgmts)
+{
+	ixgbe_rx_queue_t *rxq;
+	volatile union ixgbe_adv_rx_desc *rx_ring;
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	ixgbe_rx_entry_t *sw_ring;
+	ixgbe_rx_entry_t *rxe;
+	odp_nic_sgmt_t sgmt;
+	odp_nic_sgmt_t n_sgmt;
+	union ixgbe_adv_rx_desc rxd;
+	uint64_t dma_addr;
+	uint32_t staterr;
+	uint16_t sgmt_len;
+	uint16_t rx_id;
+	uint16_t nb_rx;
+	uint16_t nb_hold;
+
+	nb_rx = 0;
+	nb_hold = 0;
+	rxq = rx_queue;
+	rx_id = rxq->rx_tail;
+	rx_ring = rxq->rx_ring;
+	sw_ring = rxq->sw_ring;
+	while (nb_rx < nb_sgmts) {
+		/*
+		 * The order of operations here is important as the DD status
+		 * bit must not be read after any other descriptor fields.
+		 * rx_ring and rxdp are pointing to volatile data so the order
+		 * of accesses cannot be reordered by the compiler. If they were
+		 * not volatile, they could be reordered which could lead to
+		 * using invalid descriptor fields when read from rxd.
+		 */
+		rxdp = &rx_ring[rx_id];
+		staterr = rxdp->wb.upper.status_error;
+		if (!(staterr & IXGBE_CPU_TO_LE32(IXGBE_RXDADV_STAT_DD)))
+			break;
+		rxd = *rxdp;
+
+		/*
+		 * End of packet.
+		 *
+		 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
+		 * is likely to be invalid and to be dropped by the various
+		 * validation checks performed by the network stack.
+		 *
+		 * Allocate a new sgmt to replenish the RX ring descriptor.
+		 * If the allocation fails:
+		 *    - arrange for that RX descriptor to be the first one
+		 *      being parsed the next time the receive function is
+		 *      invoked [on the same queue].
+		 *
+		 *    - Stop parsing the RX ring and return immediately.
+		 *
+		 * This policy do not drop the packet received in the RX
+		 * descriptor for which the allocation of a new sgmt failed.
+		 * Thus, it allows that packet to be later retrieved if
+		 * sgmt have been freed in the mean time.
+		 * As a side effect, holding RX descriptors instead of
+		 * systematically giving them back to the NIC may lead to
+		 * RX ring exhaustion situations.
+		 * However, the NIC can gracefully prevent such situations
+		 * to happen by sending specific "back-pressure" flow control
+		 * frames to its peer(s).
+		 */
+		PMD_RX_LOG_DBG("port_id=%u queue_id=%u rx_id=%u "
+			   "ext_err_stat=0x%08x sgmt_len=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) staterr,
+			   (unsigned) IXGBE_LE16_TO_CPU(rxd.wb.upper.length));
+
+		n_sgmt = odp_nic_sgmt_alloc(rxq->sgmt_pool);
+		if (n_sgmt == ODP_NIC_SGMT_INVALID) {
+			PMD_RX_LOG_DBG("RX segment alloc failed port_id=%u "
+				   "queue_id=%u", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
+			//rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+			break;
+		}
+
+		nb_hold++;
+		rxe = &sw_ring[rx_id];
+		rx_id++;
+		if (rx_id == rxq->nb_rx_desc)
+			rx_id = 0;
+
+//		/* Prefetch next sgmt while processing current one. */
+//		rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+//
+//		/*
+//		 * When next RX descriptor is on a cache-line boundary,
+//		 * prefetch the next 4 RX descriptors and the next 8 pointers
+//		 * to sgmt.
+//		 */
+//		if ((rx_id & 0x3) == 0) {
+//			rte_ixgbe_prefetch(&rx_ring[rx_id]);
+//			rte_ixgbe_prefetch(&sw_ring[rx_id]);
+//		}
+
+		sgmt = rxe->sgmt;
+
+		/* give the new successfully allocated segment to the ring: */
+		rxe->sgmt = n_sgmt;
+		dma_addr = IXGBE_CPU_TO_LE64(odp_nic_sgmt_get_dma_addr(n_sgmt));
+		rxdp->read.hdr_addr = 0;
+		rxdp->read.pkt_addr = dma_addr;
+
+		/*
+		 * Initialize the returned mbuf.
+		 * 1) setup generic mbuf fields:
+		 *    - number of segments,
+		 *    - next segment,
+		 *    - packet length,
+		 *    - RX port identifier.
+		 * 2) integrate hardware offload data, if any:
+		 *    - RSS flag & hash,
+		 *    - IP checksum flag,
+		 *    - VLAN TCI, if any,
+		 *    - error flags.
+		 */
+		sgmt_len = (uint16_t) (IXGBE_LE16_TO_CPU(rxd.wb.upper.length) -
+				      rxq->crc_len);
+//		rte_packet_prefetch((char *)sgmt->buf_addr + sgmt->data_off);
+		odp_nic_sgmt_set_datalen(sgmt, sgmt_len);
+		odp_nic_sgmt_set_last(sgmt);
+
+//		/* set packet attributes: */
+//		pkt_info = IXGBE_LE32_TO_CPU(rxd.wb.lower.lo_dword.hs_rss.
+//								pkt_info);
+//		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+//		sgmt->vlan_tci = IXGBE_LE16_TO_CPU(rxd.wb.upper.vlan);
+//
+//		pkt_flags = rx_desc_status_to_pkt_flags(staterr) |
+//			    rx_desc_error_to_pkt_flags(staterr) |
+//			    ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+//		sgmt->ol_flags = pkt_flags;
+//		sgmt->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
+//
+//		if (likely(pkt_flags & PKT_RX_RSS_HASH))
+//			sgmt->hash.rss = IXGBE_LE32_TO_CPU(
+//						rxd.wb.lower.hi_dword.rss);
+//		else if (pkt_flags & PKT_RX_FDIR) {
+//			sgmt->hash.fdir.hash = IXGBE_LE16_TO_CPU(
+//					rxd.wb.lower.hi_dword.csum_ip.csum) &
+//					IXGBE_ATR_HASH_MASK;
+//			sgmt->hash.fdir.id = IXGBE_LE16_TO_CPU(
+//					rxd.wb.lower.hi_dword.csum_ip.ip_id);
+//		}
+
+		/*
+		 * Store the segment into the next entry of the array
+		 * of returned segments.
+		 */
+		rx_sgmts[nb_rx++] = sgmt;
+	}
+	rxq->rx_tail = rx_id;
+
+	/*
+	 * If the number of free RX descriptors is greater than the RX free
+	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+	 * register.
+	 * Update the RDT with the value of the last processed RX descriptor
+	 * minus 1, to guarantee that the RDT register is never equal to the
+	 * RDH register, which creates a "full" ring situtation from the
+	 * hardware point of view...
+	 * (The HW pointer points to the next to be read)
+	 */
+	//FIXME: why we hold is not clear
+	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+	if (nb_hold > rxq->rx_free_thresh) {
+		PMD_RX_LOG_DBG("port_id=%u queue_id=%u rx_tail=%u "
+			   "nb_hold=%u nb_rx=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) nb_hold,
+			   (unsigned) nb_rx);
+		rx_id = (uint16_t) ((rx_id == 0) ?
+				     (rxq->nb_rx_desc - 1) : (rx_id - 1));
+		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+		nb_hold = 0;
+	}
+	rxq->nb_rx_hold = nb_hold;
+	return (nb_rx);
+}
diff --git a/drivers/ixgbe/ixgbe_rxtx.h b/drivers/ixgbe/ixgbe_rxtx.h
new file mode 100644
index 0000000..65e13b8
--- /dev/null
+++ b/drivers/ixgbe/ixgbe_rxtx.h
@@ -0,0 +1,225 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_RXTX_H_
+#define _IXGBE_RXTX_H_
+
+#include  <odp_driver.h>
+#include "base/ixgbe_type.h"
+
+#define IXGBE_MAX_RING_DESC 32  //FIXME 4096
+#define IXGBE_MIN_RING_DESC 32
+
+
+/*********************************************************************
+ *
+ *  TX
+ *
+ **********************************************************************/
+#define TX_RING_SZ (sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC)
+
+/* Defines for Tx descriptor */
+#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
+			 IXGBE_ADVTXD_DCMD_IFCS |\
+			 IXGBE_ADVTXD_DCMD_DEXT |\
+			 IXGBE_ADVTXD_DCMD_EOP)
+
+#define PMD_IXGBE_TX_MAX_BURST 32
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+typedef struct {
+	odp_nic_sgmt_t sgmt; /**< segment associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+} ixgbe_tx_entry_t;
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+typedef struct {
+	odp_nic_sgmt_t sgmt; /**< segment associated with TX desc, if any. */
+} ixgbe_tx_entry_v_t;
+
+typedef struct ixgbe_txq_ops_t ixgbe_txq_ops_t;
+
+typedef struct {
+	/** TX ring virtual address. */
+	volatile union ixgbe_adv_tx_desc *tx_ring;
+	odp_dma_addr_t            tx_ring_dma_addr; /**< TX ring DMA address. */
+	union {
+		ixgbe_tx_entry_t *sw_ring; /**< address of SW ring for scalar PMD. */
+//		ixgbe_tx_entry_v_t *sw_ring_v; /**< address of SW ring for vector PMD */
+	};
+	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t            tx_tail;       /**< current value of TDT reg. */
+	/**< Start freeing TX buffers if there are less free descriptors than
+	     this value. */
+	uint16_t            tx_free_thresh;
+	/** Number of TX descriptors to use before RS bit is set. */
+	uint16_t            tx_rs_thresh;
+	/** Number of TX descriptors used since RS bit was set. */
+	uint16_t            nb_tx_used;
+	/** Index to last TX descriptor to have been cleaned. */
+	uint16_t            last_desc_cleaned;
+	/** Total number of TX descriptors ready to be allocated. */
+	uint16_t            nb_tx_free;
+	uint16_t tx_next_dd; /**< next desc to scan for DD bit */
+	uint16_t tx_next_rs; /**< next desc to set RS bit */
+	uint16_t            queue_id;      /**< TX queue index. */
+	uint16_t            reg_idx;       /**< TX queue register index. */
+	uint8_t             port_id;       /**< Device port identifier. */
+	uint8_t             pthresh;       /**< Prefetch threshold register. */
+	uint8_t             hthresh;       /**< Host threshold register. */
+	uint8_t             wthresh;       /**< Write-back threshold reg. */
+	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint32_t            ctx_curr;      /**< Hardware context states. */
+	/** Hardware context0 history. */
+//	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
+	const ixgbe_txq_ops_t *ops;       /**< txq ops */
+//	uint8_t             tx_deferred_start; /**< not in global dev start. */
+} ixgbe_tx_queue_t;
+
+
+typedef struct ixgbe_txq_ops_t{
+//	void (*release_mbufs)(struct ixgbe_tx_queue *txq);
+//	void (*free_swring)(struct ixgbe_tx_queue *txq);
+	void (*reset)(ixgbe_tx_queue_t *txq);
+} ixgbe_txq_ops_t;
+
+/*
+ * RX/TX function prototypes
+ */
+int ixgbe_dev_rx_init(odp_nic_dev_t *dev);
+int ixgbe_dev_rxtx_start(odp_nic_dev_t *dev);
+uint16_t ixgbe_recv_pkts(void *rx_queue, odp_nic_sgmt_t *rx_sgmts,
+		uint16_t nb_pkts);
+int ixgbe_dev_rx_queue_setup(odp_nic_dev_t *nic_dev,
+			 odp_pci_dev_t pci_dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 odp_nic_sgmt_pool_t sgmt_pool);
+
+int ixgbe_dev_rx_queue_start(odp_nic_dev_t *dev, uint16_t rx_queue_id);
+
+void ixgbe_dev_tx_init(odp_nic_dev_t *dev);
+int ixgbe_dev_tx_queue_setup(odp_nic_dev_t *nic_dev,
+			 odp_pci_dev_t pci_dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 odp_nic_sgmt_pool_t sgmt_pool);
+
+uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, odp_nic_sgmt_t *sgmts,
+			        uint16_t nb_pkts);
+
+/*********************************************************************
+ *
+ *  RX
+ *
+ **********************************************************************/
+
+#define RX_RING_SZ (IXGBE_MAX_RING_DESC * sizeof(union ixgbe_adv_rx_desc))
+
+#define IXGBE_POLL_WAIT_10_MS  10
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+typedef struct {
+	/**< segment associated with RX descriptor. */
+	odp_nic_sgmt_t sgmt;
+} ixgbe_rx_entry_t;
+
+/**
+ * Structure associated with each RX queue.
+ */
+typedef struct {
+	/**< nic segment pool to populate RX ring. */
+	odp_nic_sgmt_pool_t  sgmt_pool;
+
+	/**< RX hw descriptor ring virtual address. */
+	volatile union ixgbe_adv_rx_desc *rx_ring;
+
+	/**< RX hw descriptor ring DMA address. */
+	odp_dma_addr_t rx_ring_dma_addr;
+
+	/**< ReaD Tail register address. */
+	volatile uint32_t   *rdt_reg_addr;
+	/**< ReaD Head register address. */
+	volatile uint32_t   *rdh_reg_addr;
+	/**< address of RX software ring. */
+	ixgbe_rx_entry_t *sw_ring;
+
+	//struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
+	//struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	//struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+	//uint64_t            mbuf_initializer; /**< value to init mbufs */
+
+	/**< number of RX descriptors in this queue/ring. */
+	uint16_t            nb_rx_desc;
+	/**< current value of RDT register. */
+	uint16_t            rx_tail;
+	/**< number of held free RX desc. */ //FIXME: why to we hold free ring entries?
+	uint16_t            nb_rx_hold;
+	/**< max free RX desc to hold. */ //FIXME: why to we hold free ring entries?
+	uint16_t            rx_free_thresh;
+
+	//uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+	//uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+	//uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+	//uint16_t            rx_using_sse;
+	/**< indicates that vector RX is in use */
+//#ifdef RTE_IXGBE_INC_VECTOR
+	//uint16_t            rxrearm_nb;     /**< number of remaining to be re-armed */
+	//uint16_t            rxrearm_start;  /**< the idx we start the re-arming from */
+//#endif
+
+	/**< RX queue index. */ //FIXME: should be removed or replaced with something more relevant?
+	uint16_t            queue_id;
+	/**< Device port identifier. */ //FIXME: should be removed or replaced with something more relevant?
+	uint8_t             port_id;
+
+	/**< 0 if CRC stripped, 4 otherwise. */
+	uint8_t             crc_len;
+
+	uint16_t            reg_idx;  /**< RX queue register index. */
+	//uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
+	//uint8_t             rx_deferred_start; /**< not in global dev start. */
+	///** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+	//struct rte_mbuf fake_mbuf;
+	///** hold packets to return to application */
+	//struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
+} ixgbe_rx_queue_t;
+
+#endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/ixgbe/ixgbe_supported_dev.h b/drivers/ixgbe/ixgbe_supported_dev.h
new file mode 100644
index 0000000..a7e7898
--- /dev/null
+++ b/drivers/ixgbe/ixgbe_supported_dev.h
@@ -0,0 +1,119 @@ 
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP driver interface
+ */
+
+#ifndef IXGBE_SUPPORTED_DEV
+#define IXGBE_SUPPORTED_DEV
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+
+typedef struct pci_id_t {
+	uint16_t vendor_id;           /**< Vendor ID or PCI_ANY_ID. */
+	uint16_t device_id;           /**< Device ID or PCI_ANY_ID. */
+	uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */
+	uint16_t subsystem_device_id; /**< Subsystem device ID or PCI_ANY_ID. */
+} pci_id_t;
+#define PCI_ANY_ID (0xffff)
+#define SUPPORTED_DEVICE(vend, dev) {\
+	(vend),                   \
+	(dev),                    \
+	PCI_ANY_ID,               \
+	PCI_ANY_ID}
+
+#define IXGBE_DEV_ID_82598                      0x10B6
+#define IXGBE_DEV_ID_82598_BX                   0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT          0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT        0x10C7
+#define IXGBE_DEV_ID_82598AT                    0x10C8
+#define IXGBE_DEV_ID_82598AT2                   0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM            0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4                0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT        0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT         0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR              0x10F4
+#define IXGBE_DEV_ID_82599_KX4                  0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ             0x1514
+#define IXGBE_DEV_ID_82599_KR                   0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE      0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ       0x000C
+#define IXGBE_DEV_ID_82599_CX4                  0x10F9
+#define IXGBE_DEV_ID_82599_SFP                  0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP               0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC              0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR            0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP           0x0470
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE             0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM               0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2              0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP            0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP           0x1558
+#define IXGBE_DEV_ID_82599EN_SFP                0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM             0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM               0x151C
+#define IXGBE_DEV_ID_82599_LS                   0x154F
+#define IXGBE_DEV_ID_X540T                      0x1528
+#define IXGBE_DEV_ID_X540T1                     0x1560
+#define IXGBE_DEV_ID_X550EM_X_SFP               0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T             0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T              0x15AE
+#define IXGBE_DEV_ID_X550T                      0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4               0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR                0x15AB
+
+const pci_id_t pci_id_ixgbe_map[] = {
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_RNDC),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_560FLR),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_ECNA_DP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_LS),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T1),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_SFP),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_10G_T),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_1G_T),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550T),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KX4),
+SUPPORTED_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KR),
+SUPPORTED_DEVICE(0, 0)
+};
+
+#endif