lantiq: fix dma locking problems with SMP
The DMA controller used only local locks, which will not lock the section against other CPUs, fix this by using spin locks. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
This commit is contained in:
parent
828a471e8d
commit
2dac67464d
1 changed files with 152 additions and 0 deletions
|
@ -0,0 +1,152 @@
|
||||||
|
From 58078a30038b578c26c532545448fe3746648390 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Hauke Mehrtens <hauke@hauke-m.de>
|
||||||
|
Date: Thu, 29 Dec 2016 21:02:57 +0100
|
||||||
|
Subject: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP
|
||||||
|
|
||||||
|
The DMA controller channel and port configuration is changed by
|
||||||
|
selecting the port or channel in one register and then update the
|
||||||
|
configuration in other registers. This has to be done in an atomic
|
||||||
|
operation. Previously only the local interrupts were deactivated which
|
||||||
|
works for single CPU systems. If the system supports SMP a better
|
||||||
|
locking is needed, use spinlocks instead.
|
||||||
|
On more recent SoCs (at least xrx200 and later) there are two memory
|
||||||
|
regions to change the configuration, there we could use one area for
|
||||||
|
each CPU and do not have to synchronize between the CPUs and more.
|
||||||
|
|
||||||
|
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
|
||||||
|
---
|
||||||
|
arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------
|
||||||
|
1 file changed, 20 insertions(+), 18 deletions(-)
|
||||||
|
|
||||||
|
--- a/arch/mips/lantiq/xway/dma.c
|
||||||
|
+++ b/arch/mips/lantiq/xway/dma.c
|
||||||
|
@@ -20,6 +20,7 @@
|
||||||
|
#include <linux/io.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
+#include <linux/spinlock.h>
|
||||||
|
#include <linux/clk.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
|
||||||
|
@@ -59,16 +60,17 @@
|
||||||
|
ltq_dma_membase + (z))
|
||||||
|
|
||||||
|
static void __iomem *ltq_dma_membase;
|
||||||
|
+static DEFINE_SPINLOCK(ltq_dma_lock);
|
||||||
|
|
||||||
|
void
|
||||||
|
ltq_dma_enable_irq(struct ltq_dma_channel *ch)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
- local_irq_save(flags);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flags);
|
||||||
|
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||||
|
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
|
||||||
|
- local_irq_restore(flags);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
|
||||||
|
|
||||||
|
@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_chann
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
- local_irq_save(flags);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flags);
|
||||||
|
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||||
|
ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
|
||||||
|
- local_irq_restore(flags);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
|
||||||
|
|
||||||
|
@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
- local_irq_save(flags);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flags);
|
||||||
|
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||||
|
ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
|
||||||
|
- local_irq_restore(flags);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
|
||||||
|
|
||||||
|
@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
|
||||||
|
{
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
|
- local_irq_save(flag);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flag);
|
||||||
|
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||||
|
ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
|
||||||
|
- ltq_dma_enable_irq(ch);
|
||||||
|
- local_irq_restore(flag);
|
||||||
|
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flag);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_open);
|
||||||
|
|
||||||
|
@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch
|
||||||
|
{
|
||||||
|
unsigned long flag;
|
||||||
|
|
||||||
|
- local_irq_save(flag);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flag);
|
||||||
|
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||||
|
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
|
||||||
|
- ltq_dma_disable_irq(ch);
|
||||||
|
- local_irq_restore(flag);
|
||||||
|
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flag);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_close);
|
||||||
|
|
||||||
|
@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
|
||||||
|
&ch->phys, GFP_ATOMIC);
|
||||||
|
memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
|
||||||
|
|
||||||
|
- local_irq_save(flags);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flags);
|
||||||
|
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||||
|
ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
|
||||||
|
ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
|
||||||
|
@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
|
||||||
|
ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
|
||||||
|
while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
|
||||||
|
;
|
||||||
|
- local_irq_restore(flags);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel
|
||||||
|
|
||||||
|
ltq_dma_alloc(ch);
|
||||||
|
|
||||||
|
- local_irq_save(flags);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flags);
|
||||||
|
ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
|
||||||
|
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
|
||||||
|
ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
|
||||||
|
- local_irq_restore(flags);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
|
||||||
|
|
||||||
|
@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel
|
||||||
|
|
||||||
|
ltq_dma_alloc(ch);
|
||||||
|
|
||||||
|
- local_irq_save(flags);
|
||||||
|
+ spin_lock_irqsave(<q_dma_lock, flags);
|
||||||
|
ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
|
||||||
|
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
|
||||||
|
ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
|
||||||
|
- local_irq_restore(flags);
|
||||||
|
+ spin_unlock_irqrestore(<q_dma_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
|
||||||
|
|
Loading…
Reference in a new issue