diff --git a/MAINTAINERS b/MAINTAINERS
index d5d36cedf0b9716fd63ab34120c1f38e338def0e..a0245fd1b09a68d9a010438954603ec9d4710b04 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6023,6 +6023,12 @@ L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
 F:	drivers/dma/fsldma.*
 
+FREESCALE ENETC ETHERNET DRIVERS
+M:	Claudiu Manoil <claudiu.manoil@nxp.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/freescale/enetc/
+
 FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
 M:	Claudiu Manoil <claudiu.manoil@nxp.com>
 L:	netdev@vger.kernel.org
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index d3a62bc1f1c680c626dd786addee425c65de9ae5..71793e03c3c8e3f62086d63c42f8b3745dbf71b8 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -97,5 +97,6 @@ config GIANFAR
 
 source "drivers/net/ethernet/freescale/dpaa/Kconfig"
 source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
+source "drivers/net/ethernet/freescale/enetc/Kconfig"
 
 endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3b4ff08e3841ce06db72a6ae65741003d861f50b..6a93293d31e0328f5057c946a7b969793238ed8a 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/
 obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
 
 obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+
+obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..f9dd26fbfc83e14b7901cc6d900fb19433945f2c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC
+	tristate "ENETC PF driver"
+	depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+	help
+	  This driver supports NXP ENETC gigabit ethernet controller PCIe
+	  physical function (PF) devices, managing ENETC Ports at a privileged
+	  level.
+
+	  If compiled as module (M), the module name is fsl-enetc.
+
+config FSL_ENETC_VF
+	tristate "ENETC VF driver"
+	depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+	help
+	  This driver supports NXP ENETC gigabit ethernet controller PCIe
+	  virtual function (VF) devices enabled by the ENETC PF driver.
+
+	  If compiled as module (M), the module name is fsl-enetc-vf.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..a85ef2b475fe3361ce4b0012ca4db272007a9637
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
+fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o
+fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
+
+obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+
+ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
+fsl-enetc-vf-objs := enetc_vf.o
+else
+fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
+				       enetc_ethtool.o
+fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
+endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
new file mode 100644
index 0000000000000000000000000000000000000000..f63be02cffae0c6e9eae2df588c7618b2785a5a5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/of_mdio.h>
+
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val)	((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS	13
+#define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_bdr *tx_ring;
+	int count;
+
+	tx_ring = priv->tx_ring[skb->queue_mapping];
+
+	if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+		if (unlikely(skb_linearize(skb)))
+			goto drop_packet_err;
+
+	count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+		netif_stop_subqueue(ndev, tx_ring->index);
+		return NETDEV_TX_BUSY;
+	}
+
+	count = enetc_map_tx_buffs(tx_ring, skb);
+	if (unlikely(!count))
+		goto drop_packet_err;
+
+	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+		netif_stop_subqueue(ndev, tx_ring->index);
+
+	return NETDEV_TX_OK;
+
+drop_packet_err:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
+{
+	int l3_start, l3_hsize;
+	u16 l3_flags, l4_flags;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return false;
+
+	switch (skb->csum_offset) {
+	case offsetof(struct tcphdr, check):
+		l4_flags = ENETC_TXBD_L4_TCP;
+		break;
+	case offsetof(struct udphdr, check):
+		l4_flags = ENETC_TXBD_L4_UDP;
+		break;
+	default:
+		skb_checksum_help(skb);
+		return false;
+	}
+
+	l3_start = skb_network_offset(skb);
+	l3_hsize = skb_network_header_len(skb);
+
+	l3_flags = 0;
+	if (skb->protocol == htons(ETH_P_IPV6))
+		l3_flags = ENETC_TXBD_L3_IPV6;
+
+	/* write BD fields */
+	txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
+	txbd->l4_csoff = l4_flags;
+
+	return true;
+}
+
+static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
+				struct enetc_tx_swbd *tx_swbd)
+{
+	if (tx_swbd->is_dma_page)
+		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
+			       tx_swbd->len, DMA_TO_DEVICE);
+	else
+		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
+				 tx_swbd->len, DMA_TO_DEVICE);
+	tx_swbd->dma = 0;
+}
+
+static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
+			      struct enetc_tx_swbd *tx_swbd)
+{
+	if (tx_swbd->dma)
+		enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+	if (tx_swbd->skb) {
+		dev_kfree_skb_any(tx_swbd->skb);
+		tx_swbd->skb = NULL;
+	}
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+	struct enetc_tx_swbd *tx_swbd;
+	struct skb_frag_struct *frag;
+	int len = skb_headlen(skb);
+	union enetc_tx_bd temp_bd;
+	union enetc_tx_bd *txbd;
+	bool do_vlan, do_tstamp;
+	int i, count = 0;
+	unsigned int f;
+	dma_addr_t dma;
+	u8 flags = 0;
+
+	i = tx_ring->next_to_use;
+	txbd = ENETC_TXBD(*tx_ring, i);
+	prefetchw(txbd);
+
+	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+		goto dma_err;
+
+	temp_bd.addr = cpu_to_le64(dma);
+	temp_bd.buf_len = cpu_to_le16(len);
+	temp_bd.lstatus = 0;
+
+	tx_swbd = &tx_ring->tx_swbd[i];
+	tx_swbd->dma = dma;
+	tx_swbd->len = len;
+	tx_swbd->is_dma_page = 0;
+	count++;
+
+	do_vlan = skb_vlan_tag_present(skb);
+	do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+
+	if (do_vlan || do_tstamp)
+		flags |= ENETC_TXBD_FLAGS_EX;
+
+	if (enetc_tx_csum(skb, &temp_bd))
+		flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+
+	/* first BD needs frm_len and offload flags set */
+	temp_bd.frm_len = cpu_to_le16(skb->len);
+	temp_bd.flags = flags;
+
+	if (flags & ENETC_TXBD_FLAGS_EX) {
+		u8 e_flags = 0;
+		*txbd = temp_bd;
+		enetc_clear_tx_bd(&temp_bd);
+
+		/* add extension BD for VLAN and/or timestamping */
+		flags = 0;
+		tx_swbd++;
+		txbd++;
+		i++;
+		if (unlikely(i == tx_ring->bd_count)) {
+			i = 0;
+			tx_swbd = tx_ring->tx_swbd;
+			txbd = ENETC_TXBD(*tx_ring, 0);
+		}
+		prefetchw(txbd);
+
+		if (do_vlan) {
+			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+			temp_bd.ext.tpid = 0; /* < C-TAG */
+			e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+		}
+
+		if (do_tstamp) {
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+			e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
+		}
+
+		temp_bd.ext.e_flags = e_flags;
+		count++;
+	}
+
+	frag = &skb_shinfo(skb)->frags[0];
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+		len = skb_frag_size(frag);
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_err;
+
+		*txbd = temp_bd;
+		enetc_clear_tx_bd(&temp_bd);
+
+		flags = 0;
+		tx_swbd++;
+		txbd++;
+		i++;
+		if (unlikely(i == tx_ring->bd_count)) {
+			i = 0;
+			tx_swbd = tx_ring->tx_swbd;
+			txbd = ENETC_TXBD(*tx_ring, 0);
+		}
+		prefetchw(txbd);
+
+		temp_bd.addr = cpu_to_le64(dma);
+		temp_bd.buf_len = cpu_to_le16(len);
+
+		tx_swbd->dma = dma;
+		tx_swbd->len = len;
+		tx_swbd->is_dma_page = 1;
+		count++;
+	}
+
+	/* last BD needs 'F' bit set */
+	flags |= ENETC_TXBD_FLAGS_F;
+	temp_bd.flags = flags;
+	*txbd = temp_bd;
+
+	tx_ring->tx_swbd[i].skb = skb;
+
+	enetc_bdr_idx_inc(tx_ring, &i);
+	tx_ring->next_to_use = i;
+
+	/* let H/W know BD ring has been updated */
+	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+
+	return count;
+
+dma_err:
+	dev_err(tx_ring->dev, "DMA map error");
+
+	do {
+		tx_swbd = &tx_ring->tx_swbd[i];
+		enetc_free_tx_skb(tx_ring, tx_swbd);
+		if (i == 0)
+			i = tx_ring->bd_count;
+		i--;
+	} while (count--);
+
+	return 0;
+}
+
+static irqreturn_t enetc_msix(int irq, void *data)
+{
+	struct enetc_int_vector	*v = data;
+	int i;
+
+	/* disable interrupts */
+	enetc_wr_reg(v->rbier, 0);
+
+	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+	napi_schedule_irqoff(&v->napi);
+
+	return IRQ_HANDLED;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+			       struct napi_struct *napi, int work_limit);
+
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+	struct enetc_int_vector
+		*v = container_of(napi, struct enetc_int_vector, napi);
+	bool complete = true;
+	int work_done;
+	int i;
+
+	for (i = 0; i < v->count_tx_rings; i++)
+		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+			complete = false;
+
+	work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
+	if (work_done == budget)
+		complete = false;
+
+	if (!complete)
+		return budget;
+
+	napi_complete_done(napi, work_done);
+
+	/* enable interrupts */
+	enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+
+	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
+			     ENETC_TBIER_TXTIE);
+
+	return work_done;
+}
+
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+{
+	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+
+	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+{
+	struct net_device *ndev = tx_ring->ndev;
+	int tx_frm_cnt = 0, tx_byte_cnt = 0;
+	struct enetc_tx_swbd *tx_swbd;
+	int i, bds_to_clean;
+
+	i = tx_ring->next_to_clean;
+	tx_swbd = &tx_ring->tx_swbd[i];
+	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+	while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
+		bool is_eof = !!tx_swbd->skb;
+
+		enetc_unmap_tx_buff(tx_ring, tx_swbd);
+		if (is_eof) {
+			napi_consume_skb(tx_swbd->skb, napi_budget);
+			tx_swbd->skb = NULL;
+		}
+
+		tx_byte_cnt += tx_swbd->len;
+
+		bds_to_clean--;
+		tx_swbd++;
+		i++;
+		if (unlikely(i == tx_ring->bd_count)) {
+			i = 0;
+			tx_swbd = tx_ring->tx_swbd;
+		}
+
+		/* BD iteration loop end */
+		if (is_eof) {
+			tx_frm_cnt++;
+			/* re-arm interrupt source */
+			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
+				     BIT(16 + tx_ring->index));
+		}
+
+		if (unlikely(!bds_to_clean))
+			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+	}
+
+	tx_ring->next_to_clean = i;
+	tx_ring->stats.packets += tx_frm_cnt;
+	tx_ring->stats.bytes += tx_byte_cnt;
+
+	if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
+		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+		netif_wake_subqueue(ndev, tx_ring->index);
+	}
+
+	return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
+}
+
+static bool enetc_new_page(struct enetc_bdr *rx_ring,
+			   struct enetc_rx_swbd *rx_swbd)
+{
+	struct page *page;
+	dma_addr_t addr;
+
+	page = dev_alloc_page();
+	if (unlikely(!page))
+		return false;
+
+	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
+		__free_page(page);
+
+		return false;
+	}
+
+	rx_swbd->dma = addr;
+	rx_swbd->page = page;
+	rx_swbd->page_offset = ENETC_RXB_PAD;
+
+	return true;
+}
+
+static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+{
+	struct enetc_rx_swbd *rx_swbd;
+	union enetc_rx_bd *rxbd;
+	int i, j;
+
+	i = rx_ring->next_to_use;
+	rx_swbd = &rx_ring->rx_swbd[i];
+	rxbd = ENETC_RXBD(*rx_ring, i);
+
+	for (j = 0; j < buff_cnt; j++) {
+		/* try reuse page */
+		if (unlikely(!rx_swbd->page)) {
+			if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
+				rx_ring->stats.rx_alloc_errs++;
+				break;
+			}
+		}
+
+		/* update RxBD */
+		rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
+					   rx_swbd->page_offset);
+		/* clear 'R" as well */
+		rxbd->r.lstatus = 0;
+
+		rx_swbd++;
+		rxbd++;
+		i++;
+		if (unlikely(i == rx_ring->bd_count)) {
+			i = 0;
+			rx_swbd = rx_ring->rx_swbd;
+			rxbd = ENETC_RXBD(*rx_ring, 0);
+		}
+	}
+
+	if (likely(j)) {
+		rx_ring->next_to_alloc = i; /* keep track from page reuse */
+		rx_ring->next_to_use = i;
+		/* update ENETC's consumer index */
+		enetc_wr_reg(rx_ring->rcir, i);
+	}
+
+	return j;
+}
+
+static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
+{
+	/* TODO: add tstamp, hashing */
+	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
+		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
+
+		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+
+	/* copy VLAN to skb, if one is extracted, for now we assume it's a
+	 * standard TPID, but HW also supports custom values
+	 */
+	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       le16_to_cpu(rxbd->r.vlan_opt));
+}
+
+static void enetc_process_skb(struct enetc_bdr *rx_ring,
+			      struct sk_buff *skb)
+{
+	skb_record_rx_queue(skb, rx_ring->index);
+	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+}
+
+static bool enetc_page_reusable(struct page *page)
+{
+	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
+}
+
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+			     struct enetc_rx_swbd *old)
+{
+	struct enetc_rx_swbd *new;
+
+	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+	/* next buf that may reuse a page */
+	enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+	/* copy page reference */
+	*new = *old;
+}
+
+static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
+					       int i, u16 size)
+{
+	struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
+				      rx_swbd->page_offset,
+				      size, DMA_FROM_DEVICE);
+	return rx_swbd;
+}
+
+static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
+			      struct enetc_rx_swbd *rx_swbd)
+{
+	if (likely(enetc_page_reusable(rx_swbd->page))) {
+		rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
+		page_ref_inc(rx_swbd->page);
+
+		enetc_reuse_page(rx_ring, rx_swbd);
+
+		/* sync for use by the device */
+		dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+						 rx_swbd->page_offset,
+						 ENETC_RXB_DMA_SIZE,
+						 DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+	}
+
+	rx_swbd->page = NULL;
+}
+
+static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
+						int i, u16 size)
+{
+	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+	struct sk_buff *skb;
+	void *ba;
+
+	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
+	skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+	if (unlikely(!skb)) {
+		rx_ring->stats.rx_alloc_errs++;
+		return NULL;
+	}
+
+	skb_reserve(skb, ENETC_RXB_PAD);
+	__skb_put(skb, size);
+
+	enetc_put_rx_buff(rx_ring, rx_swbd);
+
+	return skb;
+}
+
+static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
+				     u16 size, struct sk_buff *skb)
+{
+	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
+			rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
+
+	enetc_put_rx_buff(rx_ring, rx_swbd);
+}
+
+#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
+
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+			       struct napi_struct *napi, int work_limit)
+{
+	int rx_frm_cnt = 0, rx_byte_cnt = 0;
+	int cleaned_cnt, i;
+
+	cleaned_cnt = enetc_bd_unused(rx_ring);
+	/* next descriptor to process */
+	i = rx_ring->next_to_clean;
+
+	while (likely(rx_frm_cnt < work_limit)) {
+		union enetc_rx_bd *rxbd;
+		struct sk_buff *skb;
+		u32 bd_status;
+		u16 size;
+
+		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+
+			cleaned_cnt -= count;
+		}
+
+		rxbd = ENETC_RXBD(*rx_ring, i);
+		bd_status = le32_to_cpu(rxbd->r.lstatus);
+		if (!bd_status)
+			break;
+
+		enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+		dma_rmb(); /* for reading other rxbd fields */
+		size = le16_to_cpu(rxbd->r.buf_len);
+		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+		if (!skb)
+			break;
+
+		enetc_get_offloads(rx_ring, rxbd, skb);
+
+		cleaned_cnt++;
+		rxbd++;
+		i++;
+		if (unlikely(i == rx_ring->bd_count)) {
+			i = 0;
+			rxbd = ENETC_RXBD(*rx_ring, 0);
+		}
+
+		if (unlikely(bd_status &
+			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+			dev_kfree_skb(skb);
+			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+				dma_rmb();
+				bd_status = le32_to_cpu(rxbd->r.lstatus);
+				rxbd++;
+				i++;
+				if (unlikely(i == rx_ring->bd_count)) {
+					i = 0;
+					rxbd = ENETC_RXBD(*rx_ring, 0);
+				}
+			}
+
+			rx_ring->ndev->stats.rx_dropped++;
+			rx_ring->ndev->stats.rx_errors++;
+
+			break;
+		}
+
+		/* not last BD in frame? */
+		while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+			bd_status = le32_to_cpu(rxbd->r.lstatus);
+			size = ENETC_RXB_DMA_SIZE;
+
+			if (bd_status & ENETC_RXBD_LSTATUS_F) {
+				dma_rmb();
+				size = le16_to_cpu(rxbd->r.buf_len);
+			}
+
+			enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+
+			cleaned_cnt++;
+			rxbd++;
+			i++;
+			if (unlikely(i == rx_ring->bd_count)) {
+				i = 0;
+				rxbd = ENETC_RXBD(*rx_ring, 0);
+			}
+		}
+
+		rx_byte_cnt += skb->len;
+
+		enetc_process_skb(rx_ring, skb);
+
+		napi_gro_receive(napi, skb);
+
+		rx_frm_cnt++;
+	}
+
+	rx_ring->next_to_clean = i;
+
+	rx_ring->stats.packets += rx_frm_cnt;
+	rx_ring->stats.bytes += rx_byte_cnt;
+
+	return rx_frm_cnt;
+}
+
+/* Probing and Init */
+void enetc_get_si_caps(struct enetc_si *si)
+{
+	struct enetc_hw *hw = &si->hw;
+	u32 val;
+
+	/* find out how many of various resources we have to work with */
+	val = enetc_rd(hw, ENETC_SICAPR0);
+	si->num_rx_rings = (val >> 16) & 0xff;
+	si->num_tx_rings = val & 0xff;
+}
+
+static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+{
+	r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
+					&r->bd_dma_base, GFP_KERNEL);
+	if (!r->bd_base)
+		return -ENOMEM;
+
+	/* h/w requires 128B alignment */
+	if (!IS_ALIGNED(r->bd_dma_base, 128)) {
+		dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
+				  r->bd_dma_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+{
+	int err;
+
+	txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
+	if (!txr->tx_swbd)
+		return -ENOMEM;
+
+	err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+	if (err) {
+		vfree(txr->tx_swbd);
+		return err;
+	}
+
+	txr->next_to_clean = 0;
+	txr->next_to_use = 0;
+
+	return 0;
+}
+
+static void enetc_free_txbdr(struct enetc_bdr *txr)
+{
+	int size, i;
+
+	for (i = 0; i < txr->bd_count; i++)
+		enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+
+	size = txr->bd_count * sizeof(union enetc_tx_bd);
+
+	dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
+	txr->bd_base = NULL;
+
+	vfree(txr->tx_swbd);
+	txr->tx_swbd = NULL;
+}
+
+static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+{
+	int i, err;
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		err = enetc_alloc_txbdr(priv->tx_ring[i]);
+
+		if (err)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	while (i-- > 0)
+		enetc_free_txbdr(priv->tx_ring[i]);
+
+	return err;
+}
+
+static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_free_txbdr(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+{
+	int err;
+
+	rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
+	if (!rxr->rx_swbd)
+		return -ENOMEM;
+
+	err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+	if (err) {
+		vfree(rxr->rx_swbd);
+		return err;
+	}
+
+	rxr->next_to_clean = 0;
+	rxr->next_to_use = 0;
+	rxr->next_to_alloc = 0;
+
+	return 0;
+}
+
+static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+{
+	int size;
+
+	size = rxr->bd_count * sizeof(union enetc_rx_bd);
+
+	dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
+	rxr->bd_base = NULL;
+
+	vfree(rxr->rx_swbd);
+	rxr->rx_swbd = NULL;
+}
+
+static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+{
+	int i, err;
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+
+		if (err)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	while (i-- > 0)
+		enetc_free_rxbdr(priv->rx_ring[i]);
+
+	return err;
+}
+
+static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_free_rxbdr(priv->rx_ring[i]);
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+	int i;
+
+	if (!tx_ring->tx_swbd)
+		return;
+
+	for (i = 0; i < tx_ring->bd_count; i++) {
+		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+		enetc_free_tx_skb(tx_ring, tx_swbd);
+	}
+
+	tx_ring->next_to_clean = 0;
+	tx_ring->next_to_use = 0;
+}
+
+static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
+{
+	int i;
+
+	if (!rx_ring->rx_swbd)
+		return;
+
+	for (i = 0; i < rx_ring->bd_count; i++) {
+		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+		if (!rx_swbd->page)
+			continue;
+
+		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_page(rx_swbd->page);
+		rx_swbd->page = NULL;
+	}
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->next_to_alloc = 0;
+}
+
+static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_free_rx_ring(priv->rx_ring[i]);
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_free_tx_ring(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+					   GFP_KERNEL);
+	if (!cbdr->bd_base)
+		return -ENOMEM;
+
+	/* h/w requires 128B alignment */
+	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+		dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+		return -EINVAL;
+	}
+
+	cbdr->next_to_clean = 0;
+	cbdr->next_to_use = 0;
+
+	return 0;
+}
+
+static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+	dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+	cbdr->bd_base = NULL;
+}
+
+static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
+{
+	/* set CBDR cache attributes */
+	enetc_wr(hw, ENETC_SICAR2,
+		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+	enetc_wr(hw, ENETC_SICBDRPIR, 0);
+	enetc_wr(hw, ENETC_SICBDRCIR, 0);
+
+	/* enable ring */
+	enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
+
+	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+}
+
+static void enetc_clear_cbdr(struct enetc_hw *hw)
+{
+	enetc_wr(hw, ENETC_SICBDRMR, 0);
+}
+
+static int enetc_configure_si(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+	struct enetc_hw *hw = &si->hw;
+
+	enetc_setup_cbdr(hw, &si->cbd_ring);
+	/* set SI cache attributes */
+	enetc_wr(hw, ENETC_SICAR0,
+		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+	enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+	/* enable SI */
+	enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+
+	return 0;
+}
+
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+	int cpus = num_online_cpus();
+
+	priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+	priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+
+	/* Enable all available TX rings in order to configure as many
+	 * priorities as possible, when needed.
+	 * TODO: Make # of TX rings run-time configurable
+	 */
+	priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
+	priv->num_tx_rings = si->num_tx_rings;
+	priv->bdr_int_num = cpus;
+
+	/* SI specific */
+	si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
+}
+
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+	int err;
+
+	err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
+	if (err)
+		return err;
+
+	err = enetc_configure_si(priv);
+	if (err)
+		goto err_config_si;
+
+	return 0;
+
+err_config_si:
+	enetc_clear_cbdr(&si->hw);
+	enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+	return err;
+}
+
+void enetc_free_si_resources(struct enetc_ndev_priv *priv)
+{
+	struct enetc_si *si = priv->si;
+
+	enetc_clear_cbdr(&si->hw);
+	enetc_free_cbdr(priv->dev, &si->cbd_ring);
+}
+
+static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+	int idx = tx_ring->index;
+	u32 tbmr;
+
+	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
+		       lower_32_bits(tx_ring->bd_dma_base));
+
+	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
+		       upper_32_bits(tx_ring->bd_dma_base));
+
+	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
+	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
+		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
+
+	/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
+	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
+	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
+
+	/* enable Tx ints by setting pkt thr to 1 */
+	enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
+
+	tbmr = ENETC_TBMR_EN;
+	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+		tbmr |= ENETC_TBMR_VIH;
+
+	/* enable ring */
+	enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+
+	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
+	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
+	tx_ring->idr = hw->reg + ENETC_SITXIDR;
+}
+
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+	int idx = rx_ring->index;
+	u32 rbmr;
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
+		       lower_32_bits(rx_ring->bd_dma_base));
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
+		       upper_32_bits(rx_ring->bd_dma_base));
+
+	WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
+		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+
+	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+
+	/* enable Rx ints by setting pkt thr to 1 */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
+
+	rbmr = ENETC_RBMR_EN;
+	if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		rbmr |= ENETC_RBMR_VTE;
+
+	rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
+	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+
+	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+
+	/* enable ring */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+}
+
+static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+	int idx = rx_ring->index;
+
+	/* disable EN bit on ring */
+	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
+}
+
+static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+	int delay = 8, timeout = 100;
+	int idx = tx_ring->index;
+
+	/* disable EN bit on ring */
+	enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+
+	/* wait for busy to clear */
+	while (delay < timeout &&
+	       enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
+		msleep(delay);
+		delay *= 2;
+	}
+
+	if (delay >= timeout)
+		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
+			    idx);
+}
+
+static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+
+	udelay(1);
+}
+
+static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+{
+	struct pci_dev *pdev = priv->si->pdev;
+	cpumask_t cpu_mask;
+	int i, j, err;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+		struct enetc_int_vector *v = priv->int_vector[i];
+		int entry = ENETC_BDR_INT_BASE_IDX + i;
+		struct enetc_hw *hw = &priv->si->hw;
+
+		snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
+			 priv->ndev->name, i);
+		err = request_irq(irq, enetc_msix, 0, v->name, v);
+		if (err) {
+			dev_err(priv->dev, "request_irq() failed!\n");
+			goto irq_err;
+		}
+
+		v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
+		v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+
+		enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
+
+		for (j = 0; j < v->count_tx_rings; j++) {
+			int idx = v->tx_ring[j].index;
+
+			enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
+		}
+		cpumask_clear(&cpu_mask);
+		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+		irq_set_affinity_hint(irq, &cpu_mask);
+	}
+
+	return 0;
+
+irq_err:
+	while (i--) {
+		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+		irq_set_affinity_hint(irq, NULL);
+		free_irq(irq, priv->int_vector[i]);
+	}
+
+	return err;
+}
+
+static void enetc_free_irqs(struct enetc_ndev_priv *priv)
+{
+	struct pci_dev *pdev = priv->si->pdev;
+	int i;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+		irq_set_affinity_hint(irq, NULL);
+		free_irq(irq, priv->int_vector[i]);
+	}
+}
+
+static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	/* enable Tx & Rx event indication */
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		enetc_rxbdr_wr(&priv->si->hw, i,
+			       ENETC_RBIER, ENETC_RBIER_RXTIE);
+	}
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		enetc_txbdr_wr(&priv->si->hw, i,
+			       ENETC_TBIER, ENETC_TBIER_TXTIE);
+	}
+}
+
+static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+}
+
+static void adjust_link(struct net_device *ndev)
+{
+	struct phy_device *phydev = ndev->phydev;
+
+	phy_print_status(phydev);
+}
+
+static int enetc_phy_connect(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct phy_device *phydev;
+
+	if (!priv->phy_node)
+		return 0; /* phy-less mode */
+
+	phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
+				0, priv->if_mode);
+	if (!phydev) {
+		dev_err(&ndev->dev, "could not attach to PHY\n");
+		return -ENODEV;
+	}
+
+	phy_attached_info(phydev);
+
+	return 0;
+}
+
+int enetc_open(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	int i, err;
+
+	err = enetc_setup_irqs(priv);
+	if (err)
+		return err;
+
+	err = enetc_phy_connect(ndev);
+	if (err)
+		goto err_phy_connect;
+
+	err = enetc_alloc_tx_resources(priv);
+	if (err)
+		goto err_alloc_tx;
+
+	err = enetc_alloc_rx_resources(priv);
+	if (err)
+		goto err_alloc_rx;
+
+	enetc_setup_bdrs(priv);
+
+	err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
+	if (err)
+		goto err_set_queues;
+
+	for (i = 0; i < priv->bdr_int_num; i++)
+		napi_enable(&priv->int_vector[i]->napi);
+
+	enetc_enable_interrupts(priv);
+
+	if (ndev->phydev)
+		phy_start(ndev->phydev);
+	else
+		netif_carrier_on(ndev);
+
+	netif_tx_start_all_queues(ndev);
+
+	return 0;
+
+err_set_queues:
+	enetc_free_rx_resources(priv);
+err_alloc_rx:
+	enetc_free_tx_resources(priv);
+err_alloc_tx:
+	if (ndev->phydev)
+		phy_disconnect(ndev->phydev);
+err_phy_connect:
+	enetc_free_irqs(priv);
+
+	return err;
+}
+
+int enetc_close(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	int i;
+
+	netif_tx_stop_all_queues(ndev);
+
+	if (ndev->phydev) {
+		phy_stop(ndev->phydev);
+		phy_disconnect(ndev->phydev);
+	} else {
+		netif_carrier_off(ndev);
+	}
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		napi_synchronize(&priv->int_vector[i]->napi);
+		napi_disable(&priv->int_vector[i]->napi);
+	}
+
+	enetc_disable_interrupts(priv);
+	enetc_clear_bdrs(priv);
+
+	enetc_free_rxtx_rings(priv);
+	enetc_free_rx_resources(priv);
+	enetc_free_tx_resources(priv);
+	enetc_free_irqs(priv);
+
+	return 0;
+}
+
+struct net_device_stats *enetc_get_stats(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	unsigned long packets = 0, bytes = 0;
+	int i;
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		packets += priv->rx_ring[i]->stats.packets;
+		bytes	+= priv->rx_ring[i]->stats.bytes;
+	}
+
+	stats->rx_packets = packets;
+	stats->rx_bytes = bytes;
+	bytes = 0;
+	packets = 0;
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		packets += priv->tx_ring[i]->stats.packets;
+		bytes	+= priv->tx_ring[i]->stats.bytes;
+	}
+
+	stats->tx_packets = packets;
+	stats->tx_bytes = bytes;
+
+	return stats;
+}
+
+int enetc_alloc_msix(struct enetc_ndev_priv *priv)
+{
+	struct pci_dev *pdev = priv->si->pdev;
+	int size, v_tx_rings;
+	int i, n, err, nvec;
+
+	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
+	/* allocate MSIX for both messaging and Rx/Tx interrupts */
+	n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+
+	if (n < 0)
+		return n;
+
+	if (n != nvec)
+		return -EPERM;
+
+	/* # of tx rings per int vector */
+	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+	size = sizeof(struct enetc_int_vector) +
+	       sizeof(struct enetc_bdr) * v_tx_rings;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		struct enetc_int_vector *v;
+		struct enetc_bdr *bdr;
+		int j;
+
+		v = kzalloc(size, GFP_KERNEL);
+		if (!v) {
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		priv->int_vector[i] = v;
+
+		netif_napi_add(priv->ndev, &v->napi, enetc_poll,
+			       NAPI_POLL_WEIGHT);
+		v->count_tx_rings = v_tx_rings;
+
+		for (j = 0; j < v_tx_rings; j++) {
+			int idx;
+
+			/* default tx ring mapping policy */
+			if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
+				idx = 2 * j + i; /* 2 CPUs */
+			else
+				idx = j + i * v_tx_rings; /* default */
+
+			__set_bit(idx, &v->tx_rings_map);
+			bdr = &v->tx_ring[j];
+			bdr->index = idx;
+			bdr->ndev = priv->ndev;
+			bdr->dev = priv->dev;
+			bdr->bd_count = priv->tx_bd_count;
+			priv->tx_ring[idx] = bdr;
+		}
+
+		bdr = &v->rx_ring;
+		bdr->index = i;
+		bdr->ndev = priv->ndev;
+		bdr->dev = priv->dev;
+		bdr->bd_count = priv->rx_bd_count;
+		priv->rx_ring[i] = bdr;
+	}
+
+	return 0;
+
+fail:
+	while (i--) {
+		netif_napi_del(&priv->int_vector[i]->napi);
+		kfree(priv->int_vector[i]);
+	}
+
+	pci_free_irq_vectors(pdev);
+
+	return err;
+}
+
+void enetc_free_msix(struct enetc_ndev_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		struct enetc_int_vector *v = priv->int_vector[i];
+
+		netif_napi_del(&v->napi);
+	}
+
+	for (i = 0; i < priv->num_rx_rings; i++)
+		priv->rx_ring[i] = NULL;
+
+	for (i = 0; i < priv->num_tx_rings; i++)
+		priv->tx_ring[i] = NULL;
+
+	for (i = 0; i < priv->bdr_int_num; i++) {
+		kfree(priv->int_vector[i]);
+		priv->int_vector[i] = NULL;
+	}
+
+	/* disable all MSIX for this device */
+	pci_free_irq_vectors(priv->si->pdev);
+}
+
+static void enetc_kfree_si(struct enetc_si *si)
+{
+	char *p = (char *)si - si->pad;
+
+	kfree(p);
+}
+
+static void enetc_detect_errata(struct enetc_si *si)
+{
+	if (si->pdev->revision == ENETC_REV1)
+		si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
+			     ENETC_ERR_UCMCSWP;
+}
+
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
+{
+	struct enetc_si *si, *p;
+	struct enetc_hw *hw;
+	size_t alloc_size;
+	int err, len;
+
+	pcie_flr(pdev);
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "device enable failed\n");
+		return err;
+	}
+
+	/* set up for high or low dma */
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (err) {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"DMA configuration failed: 0x%x\n", err);
+			goto err_dma;
+		}
+	}
+
+	err = pci_request_mem_regions(pdev, name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+		goto err_pci_mem_reg;
+	}
+
+	pci_set_master(pdev);
+
+	alloc_size = sizeof(struct enetc_si);
+	if (sizeof_priv) {
+		/* align priv to 32B */
+		alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
+		alloc_size += sizeof_priv;
+	}
+	/* force 32B alignment for enetc_si */
+	alloc_size += ENETC_SI_ALIGN - 1;
+
+	p = kzalloc(alloc_size, GFP_KERNEL);
+	if (!p) {
+		err = -ENOMEM;
+		goto err_alloc_si;
+	}
+
+	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
+	si->pad = (char *)si - (char *)p;
+
+	pci_set_drvdata(pdev, si);
+	si->pdev = pdev;
+	hw = &si->hw;
+
+	len = pci_resource_len(pdev, ENETC_BAR_REGS);
+	hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+	if (!hw->reg) {
+		err = -ENXIO;
+		dev_err(&pdev->dev, "ioremap() failed\n");
+		goto err_ioremap;
+	}
+	if (len > ENETC_PORT_BASE)
+		hw->port = hw->reg + ENETC_PORT_BASE;
+	if (len > ENETC_GLOBAL_BASE)
+		hw->global = hw->reg + ENETC_GLOBAL_BASE;
+
+	enetc_detect_errata(si);
+
+	return 0;
+
+err_ioremap:
+	enetc_kfree_si(si);
+err_alloc_si:
+	pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+void enetc_pci_remove(struct pci_dev *pdev)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_hw *hw = &si->hw;
+
+	iounmap(hw->reg);
+	enetc_kfree_si(si);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
new file mode 100644
index 0000000000000000000000000000000000000000..d19bb434bca3c733836e8a8e310d13d67c92a0b9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+#include "enetc_hw.h"
+
+#define ENETC_MAC_MAXFRM_SIZE	9600
+#define ENETC_MAX_MTU		(ENETC_MAC_MAXFRM_SIZE - \
+				(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+
+struct enetc_tx_swbd {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	u16 len;
+	u16 is_dma_page;
+};
+
+#define ENETC_RX_MAXFRM_SIZE	ENETC_MAC_MAXFRM_SIZE
+#define ENETC_RXB_TRUESIZE	2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_PAD		NET_SKB_PAD /* add extra space if needed */
+#define ENETC_RXB_DMA_SIZE	\
+	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+
+struct enetc_rx_swbd {
+	dma_addr_t dma;
+	struct page *page;
+	u16 page_offset;
+};
+
+struct enetc_ring_stats {
+	unsigned int packets;
+	unsigned int bytes;
+	unsigned int rx_alloc_errs;
+};
+
+#define ENETC_BDR_DEFAULT_SIZE	1024
+#define ENETC_DEFAULT_TX_WORK	256
+
+struct enetc_bdr {
+	struct device *dev; /* for DMA mapping */
+	struct net_device *ndev;
+	void *bd_base; /* points to Rx or Tx BD ring */
+	union {
+		void __iomem *tpir;
+		void __iomem *rcir;
+	};
+	u16 index;
+	int bd_count; /* # of BDs */
+	int next_to_use;
+	int next_to_clean;
+	union {
+		struct enetc_tx_swbd *tx_swbd;
+		struct enetc_rx_swbd *rx_swbd;
+	};
+	union {
+		void __iomem *tcir; /* Tx */
+		int next_to_alloc; /* Rx */
+	};
+	void __iomem *idr; /* Interrupt Detect Register pointer */
+
+	struct enetc_ring_stats stats;
+
+	dma_addr_t bd_dma_base;
+} ____cacheline_aligned_in_smp;
+
+static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
+{
+	if (unlikely(++*i == bdr->bd_count))
+		*i = 0;
+}
+
+static inline int enetc_bd_unused(struct enetc_bdr *bdr)
+{
+	if (bdr->next_to_clean > bdr->next_to_use)
+		return bdr->next_to_clean - bdr->next_to_use - 1;
+
+	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
+}
+
+/* Control BD ring */
+#define ENETC_CBDR_DEFAULT_SIZE	64
+struct enetc_cbdr {
+	void *bd_base; /* points to Rx or Tx BD ring */
+	void __iomem *pir;
+	void __iomem *cir;
+
+	int bd_count; /* # of BDs */
+	int next_to_use;
+	int next_to_clean;
+
+	dma_addr_t bd_dma_base;
+};
+
+#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
+#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+#define ENETC_REV1	0x1
+enum enetc_errata {
+	ENETC_ERR_TXCSUM	= BIT(0),
+	ENETC_ERR_VLAN_ISOL	= BIT(1),
+	ENETC_ERR_UCMCSWP	= BIT(2),
+};
+
+/* PCI IEP device data */
+struct enetc_si {
+	struct pci_dev *pdev;
+	struct enetc_hw hw;
+	enum enetc_errata errata;
+
+	struct net_device *ndev; /* back ref. */
+
+	struct enetc_cbdr cbd_ring;
+
+	int num_rx_rings; /* how many rings are available in the SI */
+	int num_tx_rings;
+	unsigned short pad;
+};
+
+#define ENETC_SI_ALIGN	32
+
+static inline void *enetc_si_priv(const struct enetc_si *si)
+{
+	return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
+}
+
+static inline bool enetc_si_is_pf(struct enetc_si *si)
+{
+	return !!(si->hw.port);
+}
+
+#define ENETC_MAX_NUM_TXQS	8
+#define ENETC_INT_NAME_MAX	(IFNAMSIZ + 8)
+
+struct enetc_int_vector {
+	void __iomem *rbier;
+	void __iomem *tbier_base;
+	unsigned long tx_rings_map;
+	int count_tx_rings;
+	struct napi_struct napi;
+	char name[ENETC_INT_NAME_MAX];
+
+	struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
+	struct enetc_bdr tx_ring[0];
+};
+
+#define ENETC_MAX_BDR_INT	2 /* fixed to max # of available cpus */
+
+struct enetc_ndev_priv {
+	struct net_device *ndev;
+	struct device *dev; /* dma-mapping device */
+	struct enetc_si *si;
+
+	int bdr_int_num; /* number of Rx/Tx ring interrupts */
+	struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
+	u16 num_rx_rings, num_tx_rings;
+	u16 rx_bd_count, tx_bd_count;
+
+	u16 msg_enable;
+
+	struct enetc_bdr *tx_ring[16];
+	struct enetc_bdr *rx_ring[16];
+
+	struct device_node *phy_node;
+	phy_interface_t if_mode;
+};
+
+#define ENETC_CBD(R, i)	(&(((struct enetc_cbd *)((R).bd_base))[i]))
+
+#define ENETC_CBDR_TIMEOUT	1000 /* usecs */
+
+/* SI common */
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
+void enetc_pci_remove(struct pci_dev *pdev);
+int enetc_alloc_msix(struct enetc_ndev_priv *priv);
+void enetc_free_msix(struct enetc_ndev_priv *priv);
+void enetc_get_si_caps(struct enetc_si *si);
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
+void enetc_free_si_resources(struct enetc_ndev_priv *priv);
+
+int enetc_open(struct net_device *ndev);
+int enetc_close(struct net_device *ndev);
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+/* ethtool */
+void enetc_set_ethtool_ops(struct net_device *ndev);
+
+/* control buffer descriptor ring (CBDR) */
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+			    char *mac_addr, int si_map);
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
new file mode 100644
index 0000000000000000000000000000000000000000..2ec0f2f316be05f4f624025e6f3332ca5f69a437
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+static void enetc_clean_cbdr(struct enetc_si *si)
+{
+	struct enetc_cbdr *ring = &si->cbd_ring;
+	struct enetc_cbd *dest_cbd;
+	int i, status;
+
+	i = ring->next_to_clean;
+
+	while (enetc_rd_reg(ring->cir) != i) {
+		dest_cbd = ENETC_CBD(*ring, i);
+		status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
+		if (status)
+			dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
+				 status, dest_cbd->cmd);
+
+		memset(dest_cbd, 0, sizeof(*dest_cbd));
+
+		i = (i + 1) % ring->bd_count;
+	}
+
+	ring->next_to_clean = i;
+}
+
+static int enetc_cbd_unused(struct enetc_cbdr *r)
+{
+	return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
+		r->bd_count;
+}
+
+static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+{
+	struct enetc_cbdr *ring = &si->cbd_ring;
+	int timeout = ENETC_CBDR_TIMEOUT;
+	struct enetc_cbd *dest_cbd;
+	int i;
+
+	if (unlikely(!ring->bd_base))
+		return -EIO;
+
+	if (unlikely(!enetc_cbd_unused(ring)))
+		enetc_clean_cbdr(si);
+
+	i = ring->next_to_use;
+	dest_cbd = ENETC_CBD(*ring, i);
+
+	/* copy command to the ring */
+	*dest_cbd = *cbd;
+	i = (i + 1) % ring->bd_count;
+
+	ring->next_to_use = i;
+	/* let H/W know BD ring has been updated */
+	enetc_wr_reg(ring->pir, i);
+
+	do {
+		if (enetc_rd_reg(ring->cir) == i)
+			break;
+		udelay(10); /* cannot sleep, rtnl_lock() */
+		timeout -= 10;
+	} while (timeout);
+
+	if (!timeout)
+		return -EBUSY;
+
+	enetc_clean_cbdr(si);
+
+	return 0;
+}
+
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
+{
+	struct enetc_cbd cbd;
+
+	memset(&cbd, 0, sizeof(cbd));
+
+	cbd.cls = 1;
+	cbd.status_flags = ENETC_CBD_FLAGS_SF;
+	cbd.index = cpu_to_le16(index);
+
+	return enetc_send_cmd(si, &cbd);
+}
+
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+			    char *mac_addr, int si_map)
+{
+	struct enetc_cbd cbd;
+	u32 upper;
+	u16 lower;
+
+	memset(&cbd, 0, sizeof(cbd));
+
+	/* fill up the "set" descriptor */
+	cbd.cls = 1;
+	cbd.status_flags = ENETC_CBD_FLAGS_SF;
+	cbd.index = cpu_to_le16(index);
+	cbd.opt[3] = cpu_to_le32(si_map);
+	/* enable entry */
+	cbd.opt[0] = cpu_to_le32(BIT(31));
+
+	upper = *(const u32 *)mac_addr;
+	lower = *(const u16 *)(mac_addr + 4);
+	cbd.addr[0] = cpu_to_le32(upper);
+	cbd.addr[1] = cpu_to_le32(lower);
+
+	return enetc_send_cmd(si, &cbd);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
new file mode 100644
index 0000000000000000000000000000000000000000..41771edada9776d05575c03080279397c99128bd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/net_tstamp.h>
+#include <linux/module.h>
+#include "enetc.h"
+
+static const u32 enetc_si_regs[] = {
+	ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR,
+	ENETC_SICBDRSR,	ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR,
+	ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1,
+	ENETC_SIUEFDCR
+};
+
+static const u32 enetc_txbdr_regs[] = {
+	ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
+	ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER
+};
+
+static const u32 enetc_rxbdr_regs[] = {
+	ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
+	ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER
+};
+
+static const u32 enetc_port_regs[] = {
+	ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0),
+	ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1,
+	ENETC_PSICFGR0(0), ENETC_PTCMSDUR(0), ENETC_PM0_CMD_CFG,
+	ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
+};
+
+static int enetc_get_reglen(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	int len;
+
+	len = ARRAY_SIZE(enetc_si_regs);
+	len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings;
+	len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings;
+
+	if (hw->port)
+		len += ARRAY_SIZE(enetc_port_regs);
+
+	len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
+
+	return len;
+}
+
+static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+			   void *regbuf)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	u32 *buf = (u32 *)regbuf;
+	int i, j;
+	u32 addr;
+
+	for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) {
+		*buf++ = enetc_si_regs[i];
+		*buf++ = enetc_rd(hw, enetc_si_regs[i]);
+	}
+
+	for (i = 0; i < priv->num_tx_rings; i++) {
+		for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) {
+			addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]);
+
+			*buf++ = addr;
+			*buf++ = enetc_rd(hw, addr);
+		}
+	}
+
+	for (i = 0; i < priv->num_rx_rings; i++) {
+		for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) {
+			addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]);
+
+			*buf++ = addr;
+			*buf++ = enetc_rd(hw, addr);
+		}
+	}
+
+	if (!hw->port)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) {
+		addr = ENETC_PORT_BASE + enetc_port_regs[i];
+		*buf++ = addr;
+		*buf++ = enetc_rd(hw, addr);
+	}
+}
+
+static void enetc_get_ringparam(struct net_device *ndev,
+				struct ethtool_ringparam *ring)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	ring->rx_pending = priv->rx_bd_count;
+	ring->tx_pending = priv->tx_bd_count;
+
+	/* do some h/w sanity checks for BDR length */
+	if (netif_running(ndev)) {
+		struct enetc_hw *hw = &priv->si->hw;
+		u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR);
+
+		if (val != priv->rx_bd_count)
+			netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val);
+
+		val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR);
+
+		if (val != priv->tx_bd_count)
+			netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val);
+	}
+}
+
+static const struct ethtool_ops enetc_pf_ethtool_ops = {
+	.get_regs_len = enetc_get_reglen,
+	.get_regs = enetc_get_regs,
+	.get_ringparam = enetc_get_ringparam,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct ethtool_ops enetc_vf_ethtool_ops = {
+	.get_regs_len = enetc_get_reglen,
+	.get_regs = enetc_get_regs,
+	.get_ringparam = enetc_get_ringparam,
+};
+
+void enetc_set_ethtool_ops(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	if (enetc_si_is_pf(priv->si))
+		ndev->ethtool_ops = &enetc_pf_ethtool_ops;
+	else
+		ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
new file mode 100644
index 0000000000000000000000000000000000000000..351af09f7cab432689887ede1b35e8ca88a48f74
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/bitops.h>
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_PF	0xe100
+#define ENETC_DEV_ID_VF	0xef00
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS	0
+
+/** SI regs, offset: 0h */
+#define ENETC_SIMR	0
+#define ENETC_SIMR_EN	BIT(31)
+#define ENETC_SICTR0	0x18
+#define ENETC_SICTR1	0x1c
+#define ENETC_SIPCAPR0	0x20
+#define ENETC_SIPCAPR1	0x24
+#define ENETC_SITGTGR	0x30
+#define ENETC_SIRBGCR	0x38
+/* cache attribute registers for transactions initiated by ENETC */
+#define ENETC_SICAR0	0x40
+#define ENETC_SICAR1	0x44
+#define ENETC_SICAR2	0x48
+/* rd snoop, no alloc
+ * wr snoop, no alloc, partial cache line update for BDs and full cache line
+ * update for data
+ */
+#define ENETC_SICAR_RD_COHERENT	0x2b2b0000
+#define ENETC_SICAR_WR_COHERENT	0x00006727
+#define ENETC_SICAR_MSI	0x00300030 /* rd/wr device, no snoop, no alloc */
+
+#define ENETC_SIPMAR0	0x80
+#define ENETC_SIPMAR1	0x84
+
+/* Control BDR regs */
+#define ENETC_SICBDRMR		0x800
+#define ENETC_SICBDRSR		0x804	/* RO */
+#define ENETC_SICBDRBAR0	0x810
+#define ENETC_SICBDRBAR1	0x814
+#define ENETC_SICBDRPIR		0x818
+#define ENETC_SICBDRCIR		0x81c
+#define ENETC_SICBDRLENR	0x820
+
+#define ENETC_SICAPR0	0x900
+#define ENETC_SICAPR1	0x904
+
+#define ENETC_PSIIER	0xa00
+#define ENETC_PSIIER_MR_MASK	GENMASK(2, 1)
+#define ENETC_PSIIDR	0xa08
+#define ENETC_SITXIDR	0xa18
+#define ENETC_SIRXIDR	0xa28
+#define ENETC_SIMSIVR	0xa30
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SIUEFDCR	0xe28
+
+/** SI BDR sub-blocks, n = 0..7 */
+enum enetc_bdr_type {TX, RX};
+#define ENETC_BDR_OFF(i)	((i) * 0x200)
+#define ENETC_BDR(t, i, r)	(0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
+/* RX BDR reg offsets */
+#define ENETC_RBMR	0
+#define ENETC_RBMR_BDS	BIT(2)
+#define ENETC_RBMR_VTE	BIT(5)
+#define ENETC_RBMR_EN	BIT(31)
+#define ENETC_RBSR	0x4
+#define ENETC_RBBSR	0x8
+#define ENETC_RBCIR	0xc
+#define ENETC_RBBAR0	0x10
+#define ENETC_RBBAR1	0x14
+#define ENETC_RBPIR	0x18
+#define ENETC_RBLENR	0x20
+#define ENETC_RBIER	0xa0
+#define ENETC_RBIER_RXTIE	BIT(0)
+#define ENETC_RBIDR	0xa4
+#define ENETC_RBICIR0	0xa8
+#define ENETC_RBICIR0_ICEN	BIT(31)
+
+/* TX BDR reg offsets */
+#define ENETC_TBMR	0
+#define ENETC_TBSR_BUSY	BIT(0)
+#define ENETC_TBMR_VIH	BIT(9)
+#define ENETC_TBMR_PRIO_MASK		GENMASK(2, 0)
+#define ENETC_TBMR_PRIO_SET(val)	val
+#define ENETC_TBMR_EN	BIT(31)
+#define ENETC_TBSR	0x4
+#define ENETC_TBBAR0	0x10
+#define ENETC_TBBAR1	0x14
+#define ENETC_TBPIR	0x18
+#define ENETC_TBCIR	0x1c
+#define ENETC_TBCIR_IDX_MASK	0xffff
+#define ENETC_TBLENR	0x20
+#define ENETC_TBIER	0xa0
+#define ENETC_TBIER_TXTIE	BIT(0)
+#define ENETC_TBIDR	0xa4
+#define ENETC_TBICIR0	0xa8
+#define ENETC_TBICIR0_ICEN	BIT(31)
+
+#define ENETC_RTBLENR_LEN(n)	((n) & ~0x7)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE		0x10000
+#define ENETC_PMR		0x0000
+#define ENETC_PMR_EN	GENMASK(18, 16)
+#define ENETC_PSR		0x0004 /* RO */
+#define ENETC_PSIPMR		0x0018
+#define ENETC_PSIPMR_SET_UP(n)	BIT(n) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n)	BIT((n) + 16)
+#define ENETC_PSIPVMR		0x001c
+#define ENETC_VLAN_PROMISC_MAP_ALL	0x7
+#define ENETC_PSIPVMR_SET_VP(simap)	((simap) & 0x7)
+#define ENETC_PSIPVMR_SET_VUTA(simap)	(((simap) & 0x7) << 16)
+#define ENETC_PSIPMAR0(n)	(0x0100 + (n) * 0x8) /* n = SI index */
+#define ENETC_PSIPMAR1(n)	(0x0104 + (n) * 0x8)
+#define ENETC_PVCLCTR		0x0208
+#define ENETC_VLAN_TYPE_C	BIT(0)
+#define ENETC_VLAN_TYPE_S	BIT(1)
+#define ENETC_PVCLCTR_OVTPIDL(bmp)	((bmp) & 0xff) /* VLAN_TYPE */
+#define ENETC_PSIVLANR(n)	(0x0240 + (n) * 4) /* n = SI index */
+#define ENETC_PSIVLAN_EN	BIT(31)
+#define ENETC_PSIVLAN_SET_QOS(val)	((u32)(val) << 12)
+#define ENETC_PTXMBAR		0x0608
+#define ENETC_PCAPR0		0x0900
+#define ENETC_PCAPR0_RXBDR(val)	((val) >> 24)
+#define ENETC_PCAPR0_TXBDR(val)	(((val) >> 16) & 0xff)
+#define ENETC_PCAPR1		0x0904
+#define ENETC_PSICFGR0(n)	(0x0940 + (n) * 0xc)  /* n = SI index */
+#define ENETC_PSICFGR0_SET_TXBDR(val)	((val) & 0xff)
+#define ENETC_PSICFGR0_SET_RXBDR(val)	(((val) & 0xff) << 16)
+#define ENETC_PSICFGR0_VTE	BIT(12)
+#define ENETC_PSICFGR0_SIVIE	BIT(14)
+#define ENETC_PSICFGR0_ASE	BIT(15)
+#define ENETC_PSICFGR0_SIVC(bmp)	(((bmp) & 0xff) << 24) /* VLAN_TYPE */
+
+#define ENETC_PTCCBSR0(n)	(0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_PTCCBSR1(n)	(0x1114 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_PSIVLANFMR	0x1700
+#define ENETC_PSIVLANFMR_VS	BIT(0)
+#define ENETC_PFPMR		0x1900
+#define ENETC_PFPMR_PMACE	BIT(1)
+#define ENETC_PFPMR_MWLM	BIT(0)
+#define ENETC_PSIUMHFR0(n, err)	(((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
+#define ENETC_PSIUMHFR1(n)	(0x1d04 + (n) * 0x10)
+#define ENETC_PSIMMHFR0(n, err)	(((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
+#define ENETC_PSIMMHFR1(n)	(0x1d0c + (n) * 0x10)
+#define ENETC_PSIVHFR0(n)	(0x1e00 + (n) * 8) /* n = SI index */
+#define ENETC_PSIVHFR1(n)	(0x1e04 + (n) * 8) /* n = SI index */
+#define ENETC_MMCSR		0x1f00
+#define ENETC_MMCSR_ME		BIT(16)
+#define ENETC_PTCMSDUR(n)	(0x2020 + (n) * 4) /* n = TC index [0..7] */
+
+#define ENETC_PM0_CMD_CFG	0x8008
+#define ENETC_PM1_CMD_CFG	0x9008
+#define ENETC_PM0_TX_EN		BIT(0)
+#define ENETC_PM0_RX_EN		BIT(1)
+#define ENETC_PM0_PROMISC	BIT(4)
+#define ENETC_PM0_CMD_XGLP	BIT(10)
+#define ENETC_PM0_CMD_TXP	BIT(11)
+#define ENETC_PM0_CMD_PHY_TX_EN	BIT(15)
+#define ENETC_PM0_CMD_SFD	BIT(21)
+#define ENETC_PM0_MAXFRM	0x8014
+#define ENETC_SET_TX_MTU(val)	((val) << 16)
+#define ENETC_SET_MAXFRM(val)	((val) & 0xffff)
+#define ENETC_PM0_IF_MODE	0x8300
+#define ENETC_PMO_IFM_RG	BIT(2)
+#define ENETC_PM0_IFM_RLP	(BIT(5) | BIT(11))
+#define ENETC_PM0_IFM_RGAUTO	(BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
+#define ENETC_PM0_IFM_XGMII	BIT(12)
+
+/** Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE	0x20000
+#define ENETC_G_EIPBRR0		0x0bf8
+#define ENETC_G_EIPBRR1		0x0bfc
+#define ENETC_G_EPFBLPR(n)	(0xd00 + 4 * (n))
+#define ENETC_G_EPFBLPR1_XGMII	0x80000000
+
+/* PCI device info */
+struct enetc_hw {
+	/* SI registers, used by all PCI functions */
+	void __iomem *reg;
+	/* Port registers, PF only */
+	void __iomem *port;
+	/* IP global registers, PF only */
+	void __iomem *global;
+};
+
+/* general register accessors */
+#define enetc_rd_reg(reg)	ioread32((reg))
+#define enetc_wr_reg(reg, val)	iowrite32((val), (reg))
+
+#define enetc_rd(hw, off)		enetc_rd_reg((hw)->reg + (off))
+#define enetc_wr(hw, off, val)		enetc_wr_reg((hw)->reg + (off), val)
+/* port register accessors - PF only */
+#define enetc_port_rd(hw, off)		enetc_rd_reg((hw)->port + (off))
+#define enetc_port_wr(hw, off, val)	enetc_wr_reg((hw)->port + (off), val)
+/* global register accessors - PF only */
+#define enetc_global_rd(hw, off)	enetc_rd_reg((hw)->global + (off))
+#define enetc_global_wr(hw, off, val)	enetc_wr_reg((hw)->global + (off), val)
+/* BDR register accessors, see ENETC_BDR() */
+#define enetc_bdr_rd(hw, t, n, off) \
+				enetc_rd(hw, ENETC_BDR(t, n, off))
+#define enetc_bdr_wr(hw, t, n, off, val) \
+				enetc_wr(hw, ENETC_BDR(t, n, off), val)
+#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
+#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
+#define enetc_txbdr_wr(hw, n, off, val) \
+				enetc_bdr_wr(hw, TX, n, off, val)
+#define enetc_rxbdr_wr(hw, n, off, val) \
+				enetc_bdr_wr(hw, RX, n, off, val)
+
+/* Buffer Descriptors (BD) */
+union enetc_tx_bd {
+	struct {
+		__le64 addr;
+		__le16 buf_len;
+		__le16 frm_len;
+		union {
+			struct {
+				__le16 l3_csoff;
+				u8 l4_csoff;
+				u8 flags;
+			}; /* default layout */
+			__le32 lstatus;
+		};
+	};
+	struct {
+		__le32 tstamp;
+		__le16 tpid;
+		__le16 vid;
+		u8 reserved[6];
+		u8 e_flags;
+		u8 flags;
+	} ext; /* Tx BD extension */
+};
+
+#define ENETC_TXBD_FLAGS_L4CS	BIT(0)
+#define ENETC_TXBD_FLAGS_W	BIT(2)
+#define ENETC_TXBD_FLAGS_CSUM	BIT(3)
+#define ENETC_TXBD_FLAGS_EX	BIT(6)
+#define ENETC_TXBD_FLAGS_F	BIT(7)
+
+static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
+{
+	memset(txbd, 0, sizeof(*txbd));
+}
+
+/* L3 csum flags */
+#define ENETC_TXBD_L3_IPCS	BIT(7)
+#define ENETC_TXBD_L3_IPV6	BIT(15)
+
+#define ENETC_TXBD_L3_START_MASK	GENMASK(6, 0)
+#define ENETC_TXBD_L3_SET_HSIZE(val)	((((val) >> 2) & 0x7f) << 8)
+
+/* Extension flags */
+#define ENETC_TXBD_E_FLAGS_VLAN_INS	BIT(0)
+#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP	BIT(2)
+
+static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
+{
+	return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
+			   (start & ENETC_TXBD_L3_START_MASK));
+}
+
+/* L4 csum flags */
+#define ENETC_TXBD_L4_UDP	BIT(5)
+#define ENETC_TXBD_L4_TCP	BIT(6)
+
+union enetc_rx_bd {
+	struct {
+		__le64 addr;
+		u8 reserved[8];
+	} w;
+	struct {
+		__le16 inet_csum;
+		__le16 parse_summary;
+		__le32 rss_hash;
+		__le16 buf_len;
+		__le16 vlan_opt;
+		union {
+			struct {
+				__le16 flags;
+				__le16 error;
+			};
+			__le32 lstatus;
+		};
+	} r;
+};
+
+#define ENETC_RXBD_LSTATUS_R	BIT(30)
+#define ENETC_RXBD_LSTATUS_F	BIT(31)
+#define ENETC_RXBD_ERR_MASK	0xff
+#define ENETC_RXBD_LSTATUS(flags)	((flags) << 16)
+#define ENETC_RXBD_FLAG_VLAN	BIT(9)
+#define ENETC_RXBD_FLAG_TSTMP	BIT(10)
+
+#define ENETC_MAC_ADDR_FILT_CNT	8 /* # of supported entries per port */
+#define EMETC_MAC_ADDR_FILT_RES	3 /* # of reserved entries at the beginning */
+#define ENETC_MAX_NUM_VFS	2
+
+struct enetc_cbd {
+	union {
+		struct {
+			__le32 addr[2];
+			__le32 opt[4];
+		};
+		__le32 data[6];
+	};
+	__le16 index;
+	__le16 length;
+	u8 cmd;
+	u8 cls;
+	u8 _res;
+	u8 status_flags;
+};
+
+#define ENETC_CBD_FLAGS_SF	BIT(7) /* short format */
+#define ENETC_CBD_STATUS_MASK	0xf
+
+static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+{
+	*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
+	*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+}
+
+#define ENETC_SI_INT_IDX	0
+/* base index for Rx/Tx interrupts */
+#define ENETC_BDR_INT_BASE_IDX	1
+
+/* Common H/W utility functions */
+
+static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
+				       bool en)
+{
+	u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+
+	val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
+	enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+}
+
+static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
+				       bool en)
+{
+	u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+
+	val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
+	enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
new file mode 100644
index 0000000000000000000000000000000000000000..596e0a35339a8a8ef723d2b0a7e9e8cb93c2f334
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include "enetc_pf.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+			  __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC PF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
+{
+	u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
+	u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
+
+	*(u32 *)addr = upper;
+	*(u16 *)(addr + 4) = lower;
+}
+
+static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
+					  const u8 *addr)
+{
+	u32 upper = *(const u32 *)addr;
+	u16 lower = *(const u16 *)(addr + 4);
+
+	__raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
+	__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
+}
+
+static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct sockaddr *saddr = addr;
+
+	if (!is_valid_ether_addr(saddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+	enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+
+	return 0;
+}
+
+static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
+{
+	u32 val = enetc_port_rd(hw, ENETC_PSIPVMR);
+
+	val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL);
+	enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
+}
+
+static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
+{
+	return pf->vlan_promisc_simap & BIT(si_idx);
+}
+
+static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
+{
+	int i;
+
+	for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
+		return true;
+
+	return false;
+}
+
+static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+	pf->vlan_promisc_simap |= BIT(si_idx);
+	enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+	pf->vlan_promisc_simap &= ~BIT(si_idx);
+	enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
+{
+	u32 val = 0;
+
+	if (vlan)
+		val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan;
+
+	enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
+}
+
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+	u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+	u64 mask = 0;
+	int res = 0;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		mask |= BIT_ULL(i * 6);
+
+	for (i = 0; i < 6; i++)
+		res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+	return res;
+}
+
+static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+	filter->mac_addr_cnt = 0;
+
+	bitmap_zero(filter->mac_hash_table,
+		    ENETC_MADDR_HASH_TBL_SZ);
+}
+
+static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
+					 const unsigned char *addr)
+{
+	/* add exact match addr */
+	ether_addr_copy(filter->mac_addr, addr);
+	filter->mac_addr_cnt++;
+}
+
+static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+					 const unsigned char *addr)
+{
+	int idx = enetc_mac_addr_hash_idx(addr);
+
+	/* add hash table entry */
+	__set_bit(idx, filter->mac_hash_table);
+	filter->mac_addr_cnt++;
+}
+
+static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
+{
+	bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+	if (type == UC) {
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0);
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0);
+	} else { /* MC */
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0);
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0);
+	}
+}
+
+static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
+				 u32 *hash)
+{
+	bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+	if (type == UC) {
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
+		enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
+	} else { /* MC */
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
+		enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
+	}
+}
+
+static void enetc_sync_mac_filters(struct enetc_pf *pf)
+{
+	struct enetc_mac_filter *f = pf->mac_filter;
+	struct enetc_si *si = pf->si;
+	int i, pos;
+
+	pos = EMETC_MAC_ADDR_FILT_RES;
+
+	for (i = 0; i < MADDR_TYPE; i++, f++) {
+		bool em = (f->mac_addr_cnt == 1) && (i == UC);
+		bool clear = !f->mac_addr_cnt;
+
+		if (clear) {
+			if (i == UC)
+				enetc_clear_mac_flt_entry(si, pos);
+
+			enetc_clear_mac_ht_flt(si, 0, i);
+			continue;
+		}
+
+		/* exact match filter */
+		if (em) {
+			int err;
+
+			enetc_clear_mac_ht_flt(si, 0, UC);
+
+			err = enetc_set_mac_flt_entry(si, pos, f->mac_addr,
+						      BIT(0));
+			if (!err)
+				continue;
+
+			/* fallback to HT filtering */
+			dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n",
+				 err);
+		}
+
+		/* hash table filter, clear EM filter for UC entries */
+		if (i == UC)
+			enetc_clear_mac_flt_entry(si, pos);
+
+		enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
+	}
+}
+
+static void enetc_pf_set_rx_mode(struct net_device *ndev)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	struct enetc_hw *hw = &priv->si->hw;
+	bool uprom = false, mprom = false;
+	struct enetc_mac_filter *filter;
+	struct netdev_hw_addr *ha;
+	u32 psipmr = 0;
+	bool em;
+
+	if (ndev->flags & IFF_PROMISC) {
+		/* enable promisc mode for SI0 (PF) */
+		psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+		uprom = true;
+		mprom = true;
+		/* enable VLAN promisc mode for SI0 */
+		if (!enetc_si_vlan_promisc_is_on(pf, 0))
+			enetc_enable_si_vlan_promisc(pf, 0);
+
+	} else if (ndev->flags & IFF_ALLMULTI) {
+		/* enable multi cast promisc mode for SI0 (PF) */
+		psipmr = ENETC_PSIPMR_SET_MP(0);
+		mprom = true;
+	}
+
+	/* first 2 filter entries belong to PF */
+	if (!uprom) {
+		/* Update unicast filters */
+		filter = &pf->mac_filter[UC];
+		enetc_reset_mac_addr_filter(filter);
+
+		em = (netdev_uc_count(ndev) == 1);
+		netdev_for_each_uc_addr(ha, ndev) {
+			if (em) {
+				enetc_add_mac_addr_em_filter(filter, ha->addr);
+				break;
+			}
+
+			enetc_add_mac_addr_ht_filter(filter, ha->addr);
+		}
+	}
+
+	if (!mprom) {
+		/* Update multicast filters */
+		filter = &pf->mac_filter[MC];
+		enetc_reset_mac_addr_filter(filter);
+
+		netdev_for_each_mc_addr(ha, ndev) {
+			if (!is_multicast_ether_addr(ha->addr))
+				continue;
+
+			enetc_add_mac_addr_ht_filter(filter, ha->addr);
+		}
+	}
+
+	if (!uprom || !mprom)
+		/* update PF entries */
+		enetc_sync_mac_filters(pf);
+
+	psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) &
+		  ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0));
+	enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
+}
+
+static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
+				     u32 *hash)
+{
+	enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
+	enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
+}
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+	int res = 0;
+	int i;
+
+	for (i = 0; i < 6; i++)
+		res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+	return res;
+}
+
+static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
+{
+	int i;
+
+	if (rehash) {
+		bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+
+		for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+			int hidx = enetc_vid_hash_idx(i);
+
+			__set_bit(hidx, pf->vlan_ht_filter);
+		}
+	}
+
+	enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
+}
+
+static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	int idx;
+
+	if (enetc_si_vlan_promisc_is_on(pf, 0))
+		enetc_disable_si_vlan_promisc(pf, 0);
+
+	__set_bit(vid, pf->active_vlans);
+
+	idx = enetc_vid_hash_idx(vid);
+	if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+		enetc_sync_vlan_ht_filter(pf, false);
+
+	return 0;
+}
+
+static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+	__clear_bit(vid, pf->active_vlans);
+	enetc_sync_vlan_ht_filter(pf, true);
+
+	if (!enetc_vlan_filter_is_on(pf))
+		enetc_enable_si_vlan_promisc(pf, 0);
+
+	return 0;
+}
+
+static void enetc_set_loopback(struct net_device *ndev, bool en)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_hw *hw = &priv->si->hw;
+	u32 reg;
+
+	reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+	if (reg & ENETC_PMO_IFM_RG) {
+		/* RGMII mode */
+		reg = (reg & ~ENETC_PM0_IFM_RLP) |
+		      (en ? ENETC_PM0_IFM_RLP : 0);
+		enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+	} else {
+		/* assume SGMII mode */
+		reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+		reg = (reg & ~ENETC_PM0_CMD_XGLP) |
+		      (en ? ENETC_PM0_CMD_XGLP : 0);
+		reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
+		      (en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
+		enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
+		enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+	}
+}
+
+static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+	if (vf >= pf->total_vfs)
+		return -EINVAL;
+
+	if (!is_valid_ether_addr(mac))
+		return -EADDRNOTAVAIL;
+
+	enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac);
+	return 0;
+}
+
+static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan,
+				u8 qos, __be16 proto)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+	if (priv->si->errata & ENETC_ERR_VLAN_ISOL)
+		return -EOPNOTSUPP;
+
+	if (vf >= pf->total_vfs)
+		return -EINVAL;
+
+	if (proto != htons(ETH_P_8021Q))
+		/* only C-tags supported for now */
+		return -EPROTONOSUPPORT;
+
+	enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos);
+	return 0;
+}
+
+static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+	struct enetc_pf *pf = enetc_si_priv(priv->si);
+	u32 cfgr;
+
+	if (vf >= pf->total_vfs)
+		return -EINVAL;
+
+	cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1));
+	cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0);
+	enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr);
+
+	return 0;
+}
+
+static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+{
+	unsigned char mac_addr[MAX_ADDR_LEN];
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_hw *hw = &si->hw;
+	int i;
+
+	/* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
+	for (i = 0; i < pf->total_vfs + 1; i++) {
+		enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
+		if (!is_zero_ether_addr(mac_addr))
+			continue;
+		eth_random_addr(mac_addr);
+		dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
+			 i, mac_addr);
+		enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+	}
+}
+
+static void enetc_port_si_configure(struct enetc_si *si)
+{
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_hw *hw = &si->hw;
+	int num_rings, i;
+	u32 val;
+
+	val = enetc_port_rd(hw, ENETC_PCAPR0);
+	num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
+
+	val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS);
+	val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS);
+
+	if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) {
+		val = ENETC_PSICFGR0_SET_TXBDR(num_rings);
+		val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+
+		dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n",
+			 num_rings, ENETC_PF_NUM_RINGS);
+
+		num_rings = 0;
+	}
+
+	/* Add default one-time settings for SI0 (PF) */
+	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+	enetc_port_wr(hw, ENETC_PSICFGR0(0), val);
+
+	if (num_rings)
+		num_rings -= ENETC_PF_NUM_RINGS;
+
+	/* Configure the SIs for each available VF */
+	val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+	val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+	if (num_rings) {
+		num_rings /= pf->total_vfs;
+		val |= ENETC_PSICFGR0_SET_TXBDR(num_rings);
+		val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+	}
+
+	for (i = 0; i < pf->total_vfs; i++)
+		enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val);
+
+	/* Port level VLAN settings */
+	val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+	enetc_port_wr(hw, ENETC_PVCLCTR, val);
+	/* use outer tag for VLAN filtering */
+	enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+	enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+		      ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+	enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+	enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+	enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+		      ENETC_PM0_CMD_TXP	| ENETC_PM0_PROMISC |
+		      ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+
+	enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+		      ENETC_PM0_CMD_TXP	| ENETC_PM0_PROMISC |
+		      ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+	/* set auto-speed for RGMII */
+	if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG)
+		enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
+	if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII)
+		enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
+}
+
+static void enetc_configure_port_pmac(struct enetc_hw *hw)
+{
+	u32 temp;
+
+	/* Set pMAC step lock */
+	temp = enetc_port_rd(hw, ENETC_PFPMR);
+	enetc_port_wr(hw, ENETC_PFPMR,
+		      temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
+
+	temp = enetc_port_rd(hw, ENETC_MMCSR);
+	enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+}
+
+static void enetc_configure_port(struct enetc_pf *pf)
+{
+	struct enetc_hw *hw = &pf->si->hw;
+
+	enetc_configure_port_pmac(hw);
+
+	enetc_configure_port_mac(hw);
+
+	enetc_port_si_configure(pf->si);
+
+	/* fix-up primary MAC addresses, if not set already */
+	enetc_port_setup_primary_mac_address(pf->si);
+
+	/* enforce VLAN promisc mode for all SIs */
+	pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
+	enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
+
+	enetc_port_wr(hw, ENETC_PSIPMR, 0);
+
+	/* enable port */
+	enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN);
+}
+
+#ifdef CONFIG_PCI_IOV
+static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_pf *pf = enetc_si_priv(si);
+	int err;
+
+	if (!num_vfs) {
+		pf->num_vfs = 0;
+		pci_disable_sriov(pdev);
+	} else {
+		pf->num_vfs = num_vfs;
+
+		err = pci_enable_sriov(pdev, num_vfs);
+		if (err) {
+			dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err);
+			goto err_en_sriov;
+		}
+	}
+
+	return num_vfs;
+
+err_en_sriov:
+	pf->num_vfs = 0;
+
+	return err;
+}
+#else
+#define enetc_sriov_configure(pdev, num_vfs)	(void)0
+#endif
+
+static int enetc_pf_set_features(struct net_device *ndev,
+				 netdev_features_t features)
+{
+	netdev_features_t changed = ndev->features ^ features;
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		enetc_enable_rxvlan(&priv->si->hw, 0,
+				    !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+		enetc_enable_txvlan(&priv->si->hw, 0,
+				    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
+	if (changed & NETIF_F_LOOPBACK)
+		enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+	return 0;
+}
+
+static const struct net_device_ops enetc_ndev_ops = {
+	.ndo_open		= enetc_open,
+	.ndo_stop		= enetc_close,
+	.ndo_start_xmit		= enetc_xmit,
+	.ndo_get_stats		= enetc_get_stats,
+	.ndo_set_mac_address	= enetc_pf_set_mac_addr,
+	.ndo_set_rx_mode	= enetc_pf_set_rx_mode,
+	.ndo_vlan_rx_add_vid	= enetc_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= enetc_vlan_rx_del_vid,
+	.ndo_set_vf_mac		= enetc_pf_set_vf_mac,
+	.ndo_set_vf_vlan	= enetc_pf_set_vf_vlan,
+	.ndo_set_vf_spoofchk	= enetc_pf_set_vf_spoofchk,
+	.ndo_set_features	= enetc_pf_set_features,
+};
+
+static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+				  const struct net_device_ops *ndev_ops)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	SET_NETDEV_DEV(ndev, &si->pdev->dev);
+	priv->ndev = ndev;
+	priv->si = si;
+	priv->dev = &si->pdev->dev;
+	si->ndev = ndev;
+
+	priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+	ndev->netdev_ops = ndev_ops;
+	enetc_set_ethtool_ops(ndev);
+	ndev->watchdog_timeo = 5 * HZ;
+	ndev->max_mtu = ENETC_MAX_MTU;
+
+	ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			    NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_LOOPBACK;
+	ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+			 NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			 NETIF_F_HW_VLAN_CTAG_TX |
+			 NETIF_F_HW_VLAN_CTAG_RX |
+			 NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (si->errata & ENETC_ERR_TXCSUM) {
+		ndev->hw_features &= ~NETIF_F_HW_CSUM;
+		ndev->features &= ~NETIF_F_HW_CSUM;
+	}
+
+	ndev->priv_flags |= IFF_UNICAST_FLT;
+
+	/* pick up primary MAC address from SI */
+	enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
+{
+	struct device_node *np = priv->dev->of_node;
+	int err;
+
+	if (!np) {
+		dev_err(priv->dev, "missing ENETC port node\n");
+		return -ENODEV;
+	}
+
+	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+	if (!priv->phy_node) {
+		if (!of_phy_is_fixed_link(np)) {
+			dev_err(priv->dev, "PHY not specified\n");
+			return -ENODEV;
+		}
+
+		err = of_phy_register_fixed_link(np);
+		if (err < 0) {
+			dev_err(priv->dev, "fixed link registration failed\n");
+			return err;
+		}
+
+		priv->phy_node = of_node_get(np);
+	}
+
+	priv->if_mode = of_get_phy_mode(np);
+	if (priv->if_mode < 0) {
+		dev_err(priv->dev, "missing phy type\n");
+		of_node_put(priv->phy_node);
+		if (of_phy_is_fixed_link(np))
+			of_phy_deregister_fixed_link(np);
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void enetc_of_put_phy(struct enetc_ndev_priv *priv)
+{
+	struct device_node *np = priv->dev->of_node;
+
+	if (np && of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	if (priv->phy_node)
+		of_node_put(priv->phy_node);
+}
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *ent)
+{
+	struct enetc_ndev_priv *priv;
+	struct net_device *ndev;
+	struct enetc_si *si;
+	struct enetc_pf *pf;
+	int err;
+
+	if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+		dev_info(&pdev->dev, "device is disabled, skipping\n");
+		return -ENODEV;
+	}
+
+	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+	if (err) {
+		dev_err(&pdev->dev, "PCI probing failed\n");
+		return err;
+	}
+
+	si = pci_get_drvdata(pdev);
+	if (!si->hw.port || !si->hw.global) {
+		err = -ENODEV;
+		dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
+		goto err_map_pf_space;
+	}
+
+	pf = enetc_si_priv(si);
+	pf->si = si;
+	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+
+	enetc_configure_port(pf);
+
+	enetc_get_si_caps(si);
+
+	ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+	if (!ndev) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "netdev creation failed\n");
+		goto err_alloc_netdev;
+	}
+
+	enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+	priv = netdev_priv(ndev);
+
+	enetc_init_si_rings_params(priv);
+
+	err = enetc_alloc_si_resources(priv);
+	if (err) {
+		dev_err(&pdev->dev, "SI resource alloc failed\n");
+		goto err_alloc_si_res;
+	}
+
+	err = enetc_alloc_msix(priv);
+	if (err) {
+		dev_err(&pdev->dev, "MSIX alloc failed\n");
+		goto err_alloc_msix;
+	}
+
+	err = enetc_of_get_phy(priv);
+	if (err)
+		dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
+
+	err = register_netdev(ndev);
+	if (err)
+		goto err_reg_netdev;
+
+	netif_carrier_off(ndev);
+
+	netif_info(priv, probe, ndev, "%s v%s\n",
+		   enetc_drv_name, enetc_drv_ver);
+
+	return 0;
+
+err_reg_netdev:
+	enetc_of_put_phy(priv);
+	enetc_free_msix(priv);
+err_alloc_msix:
+	enetc_free_si_resources(priv);
+err_alloc_si_res:
+	si->ndev = NULL;
+	free_netdev(ndev);
+err_alloc_netdev:
+err_map_pf_space:
+	enetc_pci_remove(pdev);
+
+	return err;
+}
+
+static void enetc_pf_remove(struct pci_dev *pdev)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_pf *pf = enetc_si_priv(si);
+	struct enetc_ndev_priv *priv;
+
+	if (pf->num_vfs)
+		enetc_sriov_configure(pdev, 0);
+
+	priv = netdev_priv(si->ndev);
+	netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+		   enetc_drv_name, enetc_drv_ver);
+
+	unregister_netdev(si->ndev);
+
+	enetc_of_put_phy(priv);
+
+	enetc_free_msix(priv);
+
+	enetc_free_si_resources(priv);
+
+	free_netdev(si->ndev);
+
+	enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_pf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
+	{ 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pf_id_table);
+
+static struct pci_driver enetc_pf_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = enetc_pf_id_table,
+	.probe = enetc_pf_probe,
+	.remove = enetc_pf_remove,
+#ifdef CONFIG_PCI_IOV
+	.sriov_configure = enetc_sriov_configure,
+#endif
+};
+module_pci_driver(enetc_pf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
new file mode 100644
index 0000000000000000000000000000000000000000..a990eb6a85f77ddc781f736d8fae368ee5e29cea
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+#define ENETC_PF_NUM_RINGS	8
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+#define ENETC_MAX_NUM_MAC_FLT	((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
+
+#define ENETC_MADDR_HASH_TBL_SZ	64
+struct enetc_mac_filter {
+	union {
+		char mac_addr[ETH_ALEN];
+		DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+	};
+	int mac_addr_cnt;
+};
+
+#define ENETC_VLAN_HT_SIZE	64
+
+struct enetc_pf {
+	struct enetc_si *si;
+	int num_vfs; /* number of active VFs, after sriov_init */
+	int total_vfs; /* max number of VFs, set for PF at probe */
+
+	struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT];
+
+	char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */
+	DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+	DECLARE_BITMAP(active_vlans, VLAN_N_VID);
+};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
new file mode 100644
index 0000000000000000000000000000000000000000..5f5103fa7891516e4fbe8a3858f596fd992af5e0
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include "enetc.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+			  __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC VF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+/* Probing/ Init */
+static const struct net_device_ops enetc_ndev_ops = {
+	.ndo_open		= enetc_open,
+	.ndo_stop		= enetc_close,
+	.ndo_start_xmit		= enetc_xmit,
+	.ndo_get_stats		= enetc_get_stats,
+};
+
+static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+				  const struct net_device_ops *ndev_ops)
+{
+	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+	SET_NETDEV_DEV(ndev, &si->pdev->dev);
+	priv->ndev = ndev;
+	priv->si = si;
+	priv->dev = &si->pdev->dev;
+	si->ndev = ndev;
+
+	priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+	ndev->netdev_ops = ndev_ops;
+	enetc_set_ethtool_ops(ndev);
+	ndev->watchdog_timeo = 5 * HZ;
+	ndev->max_mtu = ENETC_MAX_MTU;
+
+	ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			    NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_CTAG_RX;
+	ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+			 NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+			 NETIF_F_HW_VLAN_CTAG_TX |
+			 NETIF_F_HW_VLAN_CTAG_RX;
+
+	if (si->errata & ENETC_ERR_TXCSUM) {
+		ndev->hw_features &= ~NETIF_F_HW_CSUM;
+		ndev->features &= ~NETIF_F_HW_CSUM;
+	}
+
+	/* pick up primary MAC address from SI */
+	enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_vf_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *ent)
+{
+	struct enetc_ndev_priv *priv;
+	struct net_device *ndev;
+	struct enetc_si *si;
+	int err;
+
+	err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
+	if (err) {
+		dev_err(&pdev->dev, "PCI probing failed\n");
+		return err;
+	}
+
+	si = pci_get_drvdata(pdev);
+
+	enetc_get_si_caps(si);
+
+	ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+	if (!ndev) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "netdev creation failed\n");
+		goto err_alloc_netdev;
+	}
+
+	enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+	priv = netdev_priv(ndev);
+
+	enetc_init_si_rings_params(priv);
+
+	err = enetc_alloc_si_resources(priv);
+	if (err) {
+		dev_err(&pdev->dev, "SI resource alloc failed\n");
+		goto err_alloc_si_res;
+	}
+
+	err = enetc_alloc_msix(priv);
+	if (err) {
+		dev_err(&pdev->dev, "MSIX alloc failed\n");
+		goto err_alloc_msix;
+	}
+
+	err = register_netdev(ndev);
+	if (err)
+		goto err_reg_netdev;
+
+	netif_carrier_off(ndev);
+
+	netif_info(priv, probe, ndev, "%s v%s\n",
+		   enetc_drv_name, enetc_drv_ver);
+
+	return 0;
+
+err_reg_netdev:
+	enetc_free_msix(priv);
+err_alloc_msix:
+	enetc_free_si_resources(priv);
+err_alloc_si_res:
+	si->ndev = NULL;
+	free_netdev(ndev);
+err_alloc_netdev:
+	enetc_pci_remove(pdev);
+
+	return err;
+}
+
+static void enetc_vf_remove(struct pci_dev *pdev)
+{
+	struct enetc_si *si = pci_get_drvdata(pdev);
+	struct enetc_ndev_priv *priv;
+
+	priv = netdev_priv(si->ndev);
+	netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+		   enetc_drv_name, enetc_drv_ver);
+	unregister_netdev(si->ndev);
+
+	enetc_free_msix(priv);
+
+	enetc_free_si_resources(priv);
+
+	free_netdev(si->ndev);
+
+	enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_vf_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+	{ 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_vf_id_table);
+
+static struct pci_driver enetc_vf_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = enetc_vf_id_table,
+	.probe = enetc_vf_probe,
+	.remove = enetc_vf_remove,
+};
+module_pci_driver(enetc_vf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);