openwrtv3/target/linux/lantiq/patches-3.14/0018-MTD-nand-lots-of-xrx200-fixes.patch
John Crispin 18b5d72d83 lantiq: update 3.14 patches
Signed-off-by: John Crispin <blogic@openwrt.org>

SVN-Revision: 42476
2014-09-11 17:49:57 +00:00

130 lines
4.2 KiB
Diff

From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
From: John Crispin <blogic@openwrt.org>
Date: Tue, 9 Sep 2014 23:12:15 +0200
Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes
Signed-off-by: John Crispin <blogic@openwrt.org>
---
drivers/mtd/nand/xway_nand.c | 63 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index e430f2d..fedf2c4 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -54,8 +54,27 @@
#define NAND_CON_CSMUX (1 << 1)
#define NAND_CON_NANDM 1
+#define DANUBE_PCI_REG32( addr ) (*(volatile u32 *)(addr))
+#define PCI_CR_PR_OFFSET (KSEG1+0x1E105400)
+#define PCI_CR_PC_ARB (PCI_CR_PR_OFFSET + 0x0080)
+
static u32 xway_latchcmd;
+/*
+ * req_mask provides a mechanism to prevent interference between
+ * nand and pci (probably only relevant for the BT Home Hub 2B).
+ * Setting it causes the corresponding pci req pins to be masked
+ * during nand access, and also moves ebu locking from the read/write
+ * functions to the chip select function to ensure that the whole
+ * operation runs with interrupts disabled.
+ * In addition it switches on some extra waiting in xway_cmd_ctrl().
+ * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
+ * which in turn seems to be necessary for the nor chip to be recognised
+ * reliably, on a board (Home Hub 2B again) which has both nor and nand.
+ */
+
+static __be32 req_mask = 0;
+
static void xway_reset_chip(struct nand_chip *chip)
{
unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
@@ -86,12 +105,24 @@ static void xway_select_chip(struct mtd_info *mtd, int chip)
case -1:
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+
+ if (req_mask) {
+ /* Unmask all external PCI request */
+ DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
+ }
spin_unlock_irqrestore(&ebu_lock, csflags);
+
break;
case 0:
spin_lock_irqsave(&ebu_lock, csflags);
+ if (req_mask) {
+ /* Mask all external PCI request */
+ DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
+ }
+
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+
break;
default:
BUG();
@@ -103,6 +134,12 @@ static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
struct nand_chip *this = mtd->priv;
unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ if (req_mask) {
+ if (cmd != NAND_CMD_STATUS)
+ ltq_ebu_w32(EBU_NAND_WAIT, 0); /* Clear nand ready */
+ }
+
+
if (ctrl & NAND_CTRL_CHANGE) {
if (ctrl & NAND_CLE)
xway_latchcmd = NAND_WRITE_CMD;
@@ -115,6 +152,24 @@ static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
;
}
+
+ if (req_mask) {
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (cmd) {
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ return;
+ }
+
+ /* wait until command is processed */
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
+ ;
+ }
}
static int xway_dev_ready(struct mtd_info *mtd)
@@ -157,6 +212,8 @@ static int xway_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this = platform_get_drvdata(pdev);
unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ const __be32 *req_mask_ptr = of_get_property(pdev->dev.of_node,
+ "req-mask", NULL);
const __be32 *cs = of_get_property(pdev->dev.of_node,
"lantiq,cs", NULL);
u32 cs_flag = 0;
@@ -165,6 +222,12 @@ static int xway_nand_probe(struct platform_device *pdev)
if (cs && (*cs == 1))
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+ /*
+ * Load the PCI req lines to mask from the device tree. If the
+ * property is not present, setting req_mask to 0 disables masking.
+ */
+ req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
+
/* setup the EBU to run in NAND mode on our base addr */
ltq_ebu_w32(CPHYSADDR(nandaddr)
| ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
--
1.7.10.4