69 lines
2.3 KiB
Diff
69 lines
2.3 KiB
Diff
|
From: Gregory CLEMENT <gregory.clement@free-electrons.com>
|
||
|
Date: Thu, 4 Feb 2016 22:09:28 +0100
|
||
|
Subject: [PATCH] net: mvneta: The mvneta_percpu_elect function should be
|
||
|
atomic
|
||
|
|
||
|
Electing a CPU must be done in an atomic way: it should be done after or
|
||
|
before the removal/insertion of a CPU and this function is not reentrant.
|
||
|
|
||
|
During the loop of mvneta_percpu_elect we associates the queues to the
|
||
|
CPUs, if there is a topology change during this loop, then the mapping
|
||
|
between the CPUs and the queues could be wrong. During this loop the
|
||
|
interrupt mask is also updating for each CPUs, It should not be changed
|
||
|
in the same time by other part of the driver.
|
||
|
|
||
|
This patch adds spinlock to create the needed critical sections.
|
||
|
|
||
|
Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
|
||
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||
|
---
|
||
|
|
||
|
--- a/drivers/net/ethernet/marvell/mvneta.c
|
||
|
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
||
|
@@ -370,6 +370,10 @@ struct mvneta_port {
|
||
|
struct net_device *dev;
|
||
|
struct notifier_block cpu_notifier;
|
||
|
int rxq_def;
|
||
|
+ /* Protect the access to the percpu interrupt registers,
|
||
|
+ * ensuring that the configuration remains coherent.
|
||
|
+ */
|
||
|
+ spinlock_t lock;
|
||
|
|
||
|
/* Core clock */
|
||
|
struct clk *clk;
|
||
|
@@ -2853,6 +2857,12 @@ static void mvneta_percpu_elect(struct m
|
||
|
{
|
||
|
int elected_cpu = 0, max_cpu, cpu, i = 0;
|
||
|
|
||
|
+ /* Electing a CPU must be done in an atomic way: it should be
|
||
|
+ * done after or before the removal/insertion of a CPU and
|
||
|
+ * this function is not reentrant.
|
||
|
+ */
|
||
|
+ spin_lock(&pp->lock);
|
||
|
+
|
||
|
/* Use the cpu associated to the rxq when it is online, in all
|
||
|
* the other cases, use the cpu 0 which can't be offline.
|
||
|
*/
|
||
|
@@ -2896,6 +2906,7 @@ static void mvneta_percpu_elect(struct m
|
||
|
i++;
|
||
|
|
||
|
}
|
||
|
+ spin_unlock(&pp->lock);
|
||
|
};
|
||
|
|
||
|
static int mvneta_percpu_notifier(struct notifier_block *nfb,
|
||
|
@@ -2950,8 +2961,13 @@ static int mvneta_percpu_notifier(struct
|
||
|
case CPU_DOWN_PREPARE:
|
||
|
case CPU_DOWN_PREPARE_FROZEN:
|
||
|
netif_tx_stop_all_queues(pp->dev);
|
||
|
+ /* Thanks to this lock we are sure that any pending
|
||
|
+ * cpu election is done
|
||
|
+ */
|
||
|
+ spin_lock(&pp->lock);
|
||
|
/* Mask all ethernet port interrupts */
|
||
|
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
|
||
|
+ spin_unlock(&pp->lock);
|
||
|
|
||
|
napi_synchronize(&port->napi);
|
||
|
napi_disable(&port->napi);
|