@@ -90,6 +90,7 @@
#define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_WOL 0x00b8 /* Wake on LAN */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -396,6 +397,8 @@
#define MACB_PDRSFT_SIZE 1
#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */
#define MACB_SRI_SIZE 1
+#define GEM_WOL_OFFSET 28 /* Enable wake-on-lan interrupt */
+#define GEM_WOL_SIZE 1
/* Timer increment fields */
#define MACB_TI_CNS_OFFSET 0
@@ -1513,6 +1513,35 @@ static void macb_tx_restart(struct macb_queue *queue)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
+static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
+{
+ struct macb_queue *queue = dev_id;
+ struct macb *bp = queue->bp;
+ u32 status;
+
+ status = queue_readl(queue, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ if (status & GEM_BIT(WOL)) {
+ queue_writel(queue, IDR, GEM_BIT(WOL));
+ gem_writel(bp, WOL, 0);
+ netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, GEM_BIT(WOL));
+ pm_wakeup_event(&bp->pdev->dev, 0);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -3306,6 +3335,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
+ .get_wol = macb_get_wol,
+ .set_wol = macb_set_wol,
.get_link = ethtool_op_get_link,
.get_ts_info = macb_get_ts_info,
.get_ethtool_stats = gem_get_ethtool_stats,
@@ -4534,20 +4565,54 @@ static int __maybe_unused macb_suspend(struct device *dev)
struct macb_queue *queue = bp->queues;
unsigned long flags;
unsigned int q;
+ int err;
if (!netif_running(netdev))
return 0;
if (bp->wol & MACB_WOL_ENABLED) {
- macb_writel(bp, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
- enable_irq_wake(bp->queues[0].irq);
- netif_device_detach(netdev);
- } else {
- netif_device_detach(netdev);
+ spin_lock_irqsave(&bp->lock, flags);
+ /* Flush all status bits */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_disable(&queue->napi);
+ ++q, ++queue) {
+ /* Disable all interrupts */
+ queue_writel(queue, IDR, -1);
+ queue_readl(queue, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, -1);
+ }
+ /* Change interrupt handler and
+ * Enable WoL IRQ on queue 0
+ */
+ if (macb_is_gem(bp)) {
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+ err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ return err;
+ }
+ queue_writel(bp->queues, IER, GEM_BIT(WOL));
+ gem_writel(bp, WOL, MACB_BIT(MAG));
+ } else {
+ queue_writel(bp->queues, IER, MACB_BIT(WOL));
+ macb_writel(bp, WOL, MACB_BIT(MAG));
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ enable_irq_wake(bp->queues[0].irq);
+ }
+
+ netif_device_detach(netdev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_disable(&queue->napi);
+
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
rtnl_unlock();
@@ -4575,7 +4640,9 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
struct macb_queue *queue = bp->queues;
+ unsigned long flags;
unsigned int q;
+ int err;
if (!netif_running(netdev))
return 0;
@@ -4584,29 +4651,60 @@ static int __maybe_unused macb_resume(struct device *dev)
pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) {
- macb_writel(bp, IDR, MACB_BIT(WOL));
- macb_writel(bp, WOL, 0);
+ spin_lock_irqsave(&bp->lock, flags);
+ /* Disable WoL */
+ if (macb_is_gem(bp)) {
+ queue_writel(bp->queues, IDR, GEM_BIT(WOL));
+ gem_writel(bp, WOL, 0);
+ } else {
+ queue_writel(bp->queues, IDR, MACB_BIT(WOL));
+ macb_writel(bp, WOL, 0);
+ }
+ /* Clear ISR on queue 0 */
+ queue_readl(bp->queues, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(bp->queues, ISR, -1);
+ /* Replace interrupt handler on queue 0 */
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ return err;
+ }
+ /* Enable interrupts */
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ queue_writel(queue, IER,
+ bp->rx_intr_mask |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ spin_unlock_irqrestore(&bp->lock, flags);
+
disable_irq_wake(bp->queues[0].irq);
- } else {
- macb_writel(bp, NCR, MACB_BIT(MPE));
+ }
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_enable(&queue->napi);
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
if (netdev->hw_features & NETIF_F_NTUPLE)
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
- for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_enable(&queue->napi);
+ macb_init_hw(bp);
+ macb_set_rx_mode(netdev);
+ macb_restore_features(bp);
rtnl_lock();
phylink_start(bp->phylink);
rtnl_unlock();
}
- macb_init_hw(bp);
- macb_set_rx_mode(netdev);
- macb_restore_features(bp);
netif_device_attach(netdev);
if (bp->ptp_info)
bp->ptp_info->ptp_init(netdev);