f5f36911f0
SVN-Revision: 32925
7636 lines
205 KiB
Diff
7636 lines
205 KiB
Diff
Index: linux-3.3.8/arch/mips/lantiq/Kconfig
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/Kconfig 2012-07-31 19:51:33.349105884 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/Kconfig 2012-07-31 19:51:34.133105918 +0200
|
|
@@ -20,9 +20,14 @@
|
|
config SOC_FALCON
|
|
bool "FALCON"
|
|
|
|
+config SOC_SVIP
|
|
+ bool "SVIP"
|
|
+ select MIPS_CPU_SCACHE
|
|
+
|
|
endchoice
|
|
|
|
source "arch/mips/lantiq/xway/Kconfig"
|
|
source "arch/mips/lantiq/falcon/Kconfig"
|
|
+source "arch/mips/lantiq/svip/Kconfig"
|
|
|
|
endif
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/Kconfig
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/Kconfig 2012-07-31 19:51:34.133105918 +0200
|
|
@@ -0,0 +1,16 @@
|
|
+if SOC_SVIP
|
|
+
|
|
+menu "Mips Machine"
|
|
+
|
|
+config LANTIQ_MACH_EASY33016
|
|
+ bool "Easy33016"
|
|
+ default y
|
|
+
|
|
+config LANTIQ_MACH_EASY336
|
|
+ select SYS_SUPPORTS_LITTLE_ENDIAN
|
|
+ bool "Easy336"
|
|
+ default y
|
|
+
|
|
+endmenu
|
|
+
|
|
+endif
|
|
Index: linux-3.3.8/arch/mips/lantiq/Makefile
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/Makefile 2012-07-31 19:51:34.017105912 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/Makefile 2012-07-31 19:51:34.133105918 +0200
|
|
@@ -10,3 +10,4 @@
|
|
|
|
obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
|
|
obj-$(CONFIG_SOC_FALCON) += falcon/
|
|
+obj-$(CONFIG_SOC_SVIP) += svip/
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/Makefile
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/Makefile 2012-07-31 19:51:34.133105918 +0200
|
|
@@ -0,0 +1,3 @@
|
|
+obj-y := devices.o prom.o reset.o clk-svip.o gpio.o dma.o switchip_setup.o pms.o mux.o
|
|
+obj-$(CONFIG_LANTIQ_MACH_EASY33016) += mach-easy33016.o
|
|
+obj-$(CONFIG_LANTIQ_MACH_EASY336) += mach-easy336.o
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/devices.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/devices.c 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -0,0 +1,385 @@
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/mtd/physmap.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/reboot.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/leds.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/reboot.h>
|
|
+#include <linux/time.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/gpio.h>
|
|
+#include <linux/leds.h>
|
|
+#include <linux/spi/spi.h>
|
|
+#include <linux/mtd/nand.h>
|
|
+
|
|
+#include <asm/bootinfo.h>
|
|
+#include <asm/irq.h>
|
|
+
|
|
+#include <lantiq.h>
|
|
+
|
|
+#include <base_reg.h>
|
|
+#include <sys1_reg.h>
|
|
+#include <sys2_reg.h>
|
|
+#include <ebu_reg.h>
|
|
+
|
|
+#include "devices.h"
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+#include <svip_mux.h>
|
|
+#include <svip_pms.h>
|
|
+
|
|
+/* ASC */
|
|
+void __init svip_register_asc(int port)
|
|
+{
|
|
+ switch (port) {
|
|
+ case 0:
|
|
+ ltq_register_asc(0);
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_ASC0);
|
|
+ break;
|
|
+ case 1:
|
|
+ ltq_register_asc(1);
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_ASC1);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ };
|
|
+}
|
|
+
|
|
+/* Ethernet */
|
|
+static unsigned char svip_ethaddr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
|
|
+
|
|
+static struct platform_device ltq_mii = {
|
|
+ .name = "ifxmips_mii0",
|
|
+ .dev = {
|
|
+ .platform_data = svip_ethaddr,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init svip_set_ethaddr(char *str)
|
|
+{
|
|
+ sscanf(str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
|
|
+ &svip_ethaddr[0], &svip_ethaddr[1], &svip_ethaddr[2],
|
|
+ &svip_ethaddr[3], &svip_ethaddr[4], &svip_ethaddr[5]);
|
|
+ return 0;
|
|
+}
|
|
+__setup("ethaddr=", svip_set_ethaddr);
|
|
+
|
|
+void __init svip_register_eth(void)
|
|
+{
|
|
+ if (!is_valid_ether_addr(svip_ethaddr))
|
|
+ random_ether_addr(svip_ethaddr);
|
|
+
|
|
+ platform_device_register(<q_mii);
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_ETHSW);
|
|
+}
|
|
+
|
|
+/* Virtual Ethernet */
|
|
+static struct platform_device ltq_ve = {
|
|
+ .name = "ifxmips_svip_ve",
|
|
+};
|
|
+
|
|
+void __init svip_register_virtual_eth(void)
|
|
+{
|
|
+ platform_device_register(<q_ve);
|
|
+}
|
|
+
|
|
+/* SPI */
|
|
+static void __init ltq_register_ssc(int bus_num, unsigned long base, int irq_rx,
|
|
+ int irq_tx, int irq_err, int irq_frm)
|
|
+{
|
|
+ struct resource res[] = {
|
|
+ {
|
|
+ .name = "regs",
|
|
+ .start = base,
|
|
+ .end = base + 0x20 - 1,
|
|
+ .flags = IORESOURCE_MEM,
|
|
+ }, {
|
|
+ .name = "rx",
|
|
+ .start = irq_rx,
|
|
+ .flags = IORESOURCE_IRQ,
|
|
+ }, {
|
|
+ .name = "tx",
|
|
+ .start = irq_tx,
|
|
+ .flags = IORESOURCE_IRQ,
|
|
+ }, {
|
|
+ .name = "err",
|
|
+ .start = irq_err,
|
|
+ .flags = IORESOURCE_IRQ,
|
|
+ }, {
|
|
+ .name = "frm",
|
|
+ .start = irq_frm,
|
|
+ .flags = IORESOURCE_IRQ,
|
|
+ },
|
|
+ };
|
|
+
|
|
+ platform_device_register_simple("ifx_ssc", bus_num, res,
|
|
+ ARRAY_SIZE(res));
|
|
+}
|
|
+
|
|
+static struct spi_board_info bdinfo[] __initdata = {
|
|
+ {
|
|
+ .modalias = "xt16",
|
|
+ .mode = SPI_MODE_3,
|
|
+ .irq = INT_NUM_IM5_IRL0 + 28,
|
|
+ .max_speed_hz = 1000000,
|
|
+ .bus_num = 0,
|
|
+ .chip_select = 1,
|
|
+ },
|
|
+ {
|
|
+ .modalias = "xt16",
|
|
+ .mode = SPI_MODE_3,
|
|
+ .irq = INT_NUM_IM5_IRL0 + 19,
|
|
+ .max_speed_hz = 1000000,
|
|
+ .bus_num = 0,
|
|
+ .chip_select = 2,
|
|
+ },
|
|
+ {
|
|
+ .modalias = "loop",
|
|
+ .mode = SPI_MODE_0 | SPI_LOOP,
|
|
+ .irq = -1,
|
|
+ .max_speed_hz = 10000000,
|
|
+ .bus_num = 0,
|
|
+ .chip_select = 3,
|
|
+ },
|
|
+};
|
|
+
|
|
+void __init svip_register_spi(void)
|
|
+{
|
|
+
|
|
+ ltq_register_ssc(0, LTQ_SSC0_BASE, INT_NUM_IM1_IRL0 + 6,
|
|
+ INT_NUM_IM1_IRL0 + 7, INT_NUM_IM1_IRL0 + 8,
|
|
+ INT_NUM_IM1_IRL0 + 9);
|
|
+
|
|
+ ltq_register_ssc(1, LTQ_SSC1_BASE, INT_NUM_IM1_IRL0 + 10,
|
|
+ INT_NUM_IM1_IRL0 + 11, INT_NUM_IM1_IRL0 + 12,
|
|
+ INT_NUM_IM1_IRL0 + 13);
|
|
+
|
|
+ spi_register_board_info(bdinfo, ARRAY_SIZE(bdinfo));
|
|
+
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_SSC0 | SYS1_CLKENR_SSC1);
|
|
+}
|
|
+
|
|
+void __init svip_register_spi_flash(struct spi_board_info *bdinfo)
|
|
+{
|
|
+ spi_register_board_info(bdinfo, 1);
|
|
+}
|
|
+
|
|
+/* GPIO */
|
|
+static struct platform_device ltq_gpio = {
|
|
+ .name = "ifxmips_gpio",
|
|
+};
|
|
+
|
|
+static struct platform_device ltq_gpiodev = {
|
|
+ .name = "GPIODEV",
|
|
+};
|
|
+
|
|
+void __init svip_register_gpio(void)
|
|
+{
|
|
+ platform_device_register(<q_gpio);
|
|
+ platform_device_register(<q_gpiodev);
|
|
+}
|
|
+
|
|
+/* MUX */
|
|
+static struct ltq_mux_settings ltq_mux_settings;
|
|
+
|
|
+static struct platform_device ltq_mux = {
|
|
+ .name = "ltq_mux",
|
|
+ .dev = {
|
|
+ .platform_data = <q_mux_settings,
|
|
+ }
|
|
+};
|
|
+
|
|
+void __init svip_register_mux(const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS],
|
|
+ const struct ltq_mux_pin mux_p1[LTQ_MUX_P1_PINS],
|
|
+ const struct ltq_mux_pin mux_p2[LTQ_MUX_P2_PINS],
|
|
+ const struct ltq_mux_pin mux_p3[LTQ_MUX_P3_PINS],
|
|
+ const struct ltq_mux_pin mux_p4[LTQ_MUX_P4_PINS])
|
|
+{
|
|
+ ltq_mux_settings.mux_p0 = mux_p0;
|
|
+ ltq_mux_settings.mux_p1 = mux_p1;
|
|
+ ltq_mux_settings.mux_p2 = mux_p2;
|
|
+ ltq_mux_settings.mux_p3 = mux_p3;
|
|
+ ltq_mux_settings.mux_p4 = mux_p4;
|
|
+
|
|
+ if (mux_p0)
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT0);
|
|
+
|
|
+ if (mux_p1)
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT1);
|
|
+
|
|
+ if (mux_p2)
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT2);
|
|
+
|
|
+ if (mux_p3)
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT3);
|
|
+
|
|
+ if (mux_p4)
|
|
+ svip_sys2_clk_enable(SYS2_CLKENR_PORT4);
|
|
+
|
|
+ platform_device_register(<q_mux);
|
|
+}
|
|
+
|
|
+/* NAND */
|
|
+#define NAND_ADDR_REGION_BASE (LTQ_EBU_SEG1_BASE)
|
|
+#define NAND_CLE_BIT (1 << 3)
|
|
+#define NAND_ALE_BIT (1 << 2)
|
|
+
|
|
+static struct svip_reg_ebu *const ebu = (struct svip_reg_ebu *)LTQ_EBU_BASE;
|
|
+
|
|
+static int svip_nand_probe(struct platform_device *pdev)
|
|
+{
|
|
+ ebu_w32(LTQ_EBU_ADDR_SEL_0_BASE_VAL(CPHYSADDR(NAND_ADDR_REGION_BASE)
|
|
+ >> 12)
|
|
+ | LTQ_EBU_ADDR_SEL_0_MASK_VAL(15)
|
|
+ | LTQ_EBU_ADDR_SEL_0_MRME_VAL(0)
|
|
+ | LTQ_EBU_ADDR_SEL_0_REGEN_VAL(1),
|
|
+ addr_sel_0);
|
|
+
|
|
+ ebu_w32(LTQ_EBU_CON_0_WRDIS_VAL(0)
|
|
+ | LTQ_EBU_CON_0_ADSWP_VAL(1)
|
|
+ | LTQ_EBU_CON_0_AGEN_VAL(0x00)
|
|
+ | LTQ_EBU_CON_0_SETUP_VAL(1)
|
|
+ | LTQ_EBU_CON_0_WAIT_VAL(0x00)
|
|
+ | LTQ_EBU_CON_0_WINV_VAL(0)
|
|
+ | LTQ_EBU_CON_0_PW_VAL(0x00)
|
|
+ | LTQ_EBU_CON_0_ALEC_VAL(0)
|
|
+ | LTQ_EBU_CON_0_BCGEN_VAL(0x01)
|
|
+ | LTQ_EBU_CON_0_WAITWRC_VAL(1)
|
|
+ | LTQ_EBU_CON_0_WAITRDC_VAL(1)
|
|
+ | LTQ_EBU_CON_0_HOLDC_VAL(1)
|
|
+ | LTQ_EBU_CON_0_RECOVC_VAL(0)
|
|
+ | LTQ_EBU_CON_0_CMULT_VAL(0x01),
|
|
+ con_0);
|
|
+
|
|
+ /*
|
|
+ * ECC disabled
|
|
+ * CLE, ALE and CS are pulse, all other signal are latches based
|
|
+ * CLE and ALE are active high, PRE, WP, SE and CS/CE are active low
|
|
+ * OUT_CS_S is disabled
|
|
+ * NAND mode is disabled
|
|
+ */
|
|
+ ebu_w32(LTQ_EBU_NAND_CON_ECC_ON_VAL(0)
|
|
+ | LTQ_EBU_NAND_CON_LAT_EN_VAL(0x38)
|
|
+ | LTQ_EBU_NAND_CON_OUT_CS_S_VAL(0)
|
|
+ | LTQ_EBU_NAND_CON_IN_CS_S_VAL(0)
|
|
+ | LTQ_EBU_NAND_CON_PRE_P_VAL(1)
|
|
+ | LTQ_EBU_NAND_CON_WP_P_VAL(1)
|
|
+ | LTQ_EBU_NAND_CON_SE_P_VAL(1)
|
|
+ | LTQ_EBU_NAND_CON_CS_P_VAL(1)
|
|
+ | LTQ_EBU_NAND_CON_CLE_P_VAL(0)
|
|
+ | LTQ_EBU_NAND_CON_ALE_P_VAL(0)
|
|
+ | LTQ_EBU_NAND_CON_CSMUX_E_VAL(0)
|
|
+ | LTQ_EBU_NAND_CON_NANDMODE_VAL(0),
|
|
+ nand_con);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void svip_nand_hwcontrol(struct mtd_info *mtd, int cmd,
|
|
+ unsigned int ctrl)
|
|
+{
|
|
+ struct nand_chip *this = mtd->priv;
|
|
+
|
|
+ if (ctrl & NAND_CTRL_CHANGE) {
|
|
+ unsigned long adr;
|
|
+ /* Coming here means to change either the enable state or
|
|
+ * the address for controlling ALE or CLE */
|
|
+
|
|
+ /* NAND_NCE: Select the chip by setting nCE to low.
|
|
+ * This is done in CON register */
|
|
+ if (ctrl & NAND_NCE)
|
|
+ ebu_w32_mask(0, LTQ_EBU_NAND_CON_NANDMODE_VAL(1),
|
|
+ nand_con);
|
|
+ else
|
|
+ ebu_w32_mask(LTQ_EBU_NAND_CON_NANDMODE_VAL(1),
|
|
+ 0, nand_con);
|
|
+
|
|
+ /* The addressing of CLE or ALE is done via different addresses.
|
|
+ We are now changing the address depending on the given action
|
|
+ SVIPs NAND_CLE_BIT = (1 << 3), NAND_CLE = 0x02
|
|
+ NAND_ALE_BIT = (1 << 2) = NAND_ALE (0x04) */
|
|
+ adr = (unsigned long)this->IO_ADDR_W;
|
|
+ adr &= ~(NAND_CLE_BIT | NAND_ALE_BIT);
|
|
+ adr |= (ctrl & NAND_CLE) << 2 | (ctrl & NAND_ALE);
|
|
+ this->IO_ADDR_W = (void __iomem *)adr;
|
|
+ }
|
|
+
|
|
+ if (cmd != NAND_CMD_NONE)
|
|
+ writeb(cmd, this->IO_ADDR_W);
|
|
+}
|
|
+
|
|
+static int svip_nand_ready(struct mtd_info *mtd)
|
|
+{
|
|
+ return (ebu_r32(nand_wait) & 0x01) == 0x01;
|
|
+}
|
|
+
|
|
+static inline void svip_nand_wait(void)
|
|
+{
|
|
+ static const int nops = 150;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < nops; i++)
|
|
+ asm("nop");
|
|
+}
|
|
+
|
|
+static void svip_nand_write_buf(struct mtd_info *mtd,
|
|
+ const u_char *buf, int len)
|
|
+{
|
|
+ int i;
|
|
+ struct nand_chip *this = mtd->priv;
|
|
+
|
|
+ for (i = 0; i < len; i++) {
|
|
+ writeb(buf[i], this->IO_ADDR_W);
|
|
+ svip_nand_wait();
|
|
+ }
|
|
+}
|
|
+
|
|
+static void svip_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
|
|
+{
|
|
+ int i;
|
|
+ struct nand_chip *this = mtd->priv;
|
|
+
|
|
+ for (i = 0; i < len; i++) {
|
|
+ buf[i] = readb(this->IO_ADDR_R);
|
|
+ svip_nand_wait();
|
|
+ }
|
|
+}
|
|
+
|
|
+static const char *part_probes[] = { "cmdlinepart", NULL };
|
|
+
|
|
+static struct platform_nand_data svip_flash_nand_data = {
|
|
+ .chip = {
|
|
+ .nr_chips = 1,
|
|
+ .part_probe_types = part_probes,
|
|
+ },
|
|
+ .ctrl = {
|
|
+ .probe = svip_nand_probe,
|
|
+ .cmd_ctrl = svip_nand_hwcontrol,
|
|
+ .dev_ready = svip_nand_ready,
|
|
+ .write_buf = svip_nand_write_buf,
|
|
+ .read_buf = svip_nand_read_buf,
|
|
+ }
|
|
+};
|
|
+
|
|
+static struct resource svip_nand_resources[] = {
|
|
+ MEM_RES("nand", LTQ_FLASH_START, LTQ_FLASH_MAX),
|
|
+};
|
|
+
|
|
+static struct platform_device svip_flash_nand = {
|
|
+ .name = "gen_nand",
|
|
+ .id = -1,
|
|
+ .num_resources = ARRAY_SIZE(svip_nand_resources),
|
|
+ .resource = svip_nand_resources,
|
|
+ .dev = {
|
|
+ .platform_data = &svip_flash_nand_data,
|
|
+ },
|
|
+};
|
|
+
|
|
+void __init svip_register_nand(void)
|
|
+{
|
|
+ platform_device_register(&svip_flash_nand);
|
|
+}
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/clk-svip.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/clk-svip.c 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -0,0 +1,100 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
|
|
+ */
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/time.h>
|
|
+
|
|
+#include <asm/irq.h>
|
|
+#include <asm/div64.h>
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+#include <base_reg.h>
|
|
+#include <sys0_reg.h>
|
|
+#include <sys1_reg.h>
|
|
+#include <status_reg.h>
|
|
+
|
|
+static struct svip_reg_status *const status =
|
|
+(struct svip_reg_status *)LTQ_STATUS_BASE;
|
|
+static struct svip_reg_sys0 *const sys0 = (struct svip_reg_sys0 *)LTQ_SYS0_BASE;
|
|
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
|
|
+
|
|
+unsigned int ltq_svip_io_region_clock(void)
|
|
+{
|
|
+ return 200000000; /* 200 MHz */
|
|
+}
|
|
+EXPORT_SYMBOL(ltq_svip_io_region_clock);
|
|
+
|
|
+unsigned int ltq_svip_cpu_hz(void)
|
|
+{
|
|
+ /* Magic BootROM speed location... */
|
|
+ if ((*(u32 *)0x9fc07ff0) == 1)
|
|
+ return *(u32 *)0x9fc07ff4;
|
|
+
|
|
+ if (STATUS_CONFIG_CLK_MODE_GET(status_r32(config)) == 1) {
|
|
+ /* xT16 */
|
|
+ return 393216000;
|
|
+ } else {
|
|
+ switch (SYS0_PLL1CR_PLLDIV_GET(sys0_r32(pll1cr))) {
|
|
+ case 3:
|
|
+ return 475000000;
|
|
+ case 2:
|
|
+ return 450000000;
|
|
+ case 1:
|
|
+ return 425000000;
|
|
+ default:
|
|
+ return 400000000;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL(ltq_svip_cpu_hz);
|
|
+
|
|
+unsigned int ltq_svip_fpi_hz(void)
|
|
+{
|
|
+ u32 fbs0_div[2] = {4, 8};
|
|
+ u32 div;
|
|
+
|
|
+ div = SYS1_FPICR_FPIDIV_GET(sys1_r32(fpicr));
|
|
+ return ltq_svip_cpu_hz()/fbs0_div[div];
|
|
+}
|
|
+EXPORT_SYMBOL(ltq_svip_fpi_hz);
|
|
+
|
|
+unsigned int ltq_get_ppl_hz(void)
|
|
+{
|
|
+ /* Magic BootROM speed location... */
|
|
+ if ((*(u32 *)0x9fc07ff0) == 1)
|
|
+ return *(u32 *)0x9fc07ff4;
|
|
+
|
|
+ if (STATUS_CONFIG_CLK_MODE_GET(status_r32(config)) == 1) {
|
|
+ /* xT16 */
|
|
+ return 393216000;
|
|
+ } else {
|
|
+ switch (SYS0_PLL1CR_PLLDIV_GET(sys0_r32(pll1cr))) {
|
|
+ case 3:
|
|
+ return 475000000;
|
|
+ case 2:
|
|
+ return 450000000;
|
|
+ case 1:
|
|
+ return 425000000;
|
|
+ default:
|
|
+ return 400000000;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+unsigned int ltq_get_fbs0_hz(void)
|
|
+{
|
|
+ u32 fbs0_div[2] = {4, 8};
|
|
+ u32 div;
|
|
+
|
|
+ div = SYS1_FPICR_FPIDIV_GET(sys1_r32(fpicr));
|
|
+ return ltq_get_ppl_hz()/fbs0_div[div];
|
|
+}
|
|
+EXPORT_SYMBOL(ltq_get_fbs0_hz);
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/gpio.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/gpio.c 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -0,0 +1,553 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published
|
|
+ * by the Free Software Foundation.
|
|
+ *
|
|
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/gpio.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/ioctl.h>
|
|
+#include <linux/timer.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/kobject.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/netlink.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <net/sock.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/semaphore.h>
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+#include <svip_mux.h>
|
|
+#include <base_reg.h>
|
|
+#include <port_reg.h>
|
|
+
|
|
+#define DRV_NAME "ifxmips_gpio"
|
|
+
|
|
+int gpio_to_irq(unsigned int gpio)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+EXPORT_SYMBOL(gpio_to_irq);
|
|
+
|
|
+int irq_to_gpio(unsigned int gpio)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+EXPORT_SYMBOL(irq_to_gpio);
|
|
+
|
|
+struct ltq_port_base {
|
|
+ struct svip_reg_port *base;
|
|
+ u32 pins;
|
|
+};
|
|
+
|
|
+/* Base addresses for ports */
|
|
+static const struct ltq_port_base ltq_port_base[] = {
|
|
+ { (struct svip_reg_port *)LTQ_PORT_P0_BASE, 20 },
|
|
+ { (struct svip_reg_port *)LTQ_PORT_P1_BASE, 20 },
|
|
+ { (struct svip_reg_port *)LTQ_PORT_P2_BASE, 19 },
|
|
+ { (struct svip_reg_port *)LTQ_PORT_P3_BASE, 20 },
|
|
+ { (struct svip_reg_port *)LTQ_PORT_P4_BASE, 24 }
|
|
+};
|
|
+
|
|
+#define MAX_PORTS ARRAY_SIZE(ltq_port_base)
|
|
+#define PINS_PER_PORT(port) (ltq_port_base[port].pins)
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_exintcr0(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->exintcr0) | (1 << pin),
|
|
+ ltq_port_base[port].base->exintcr0);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_exintcr0(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->exintcr0) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->exintcr0);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_exintcr1(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->exintcr1) | (1 << pin),
|
|
+ ltq_port_base[port].base->exintcr1);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_exintcr1(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->exintcr1) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->exintcr1);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_irncfg(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->irncfg) | (1 << pin),
|
|
+ ltq_port_base[port].base->irncfg);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_irncfg(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->irncfg) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->irncfg);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_irnen(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(1 << pin, ltq_port_base[port].base->irnenset);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_irnen(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(1 << pin, ltq_port_base[port].base->irnenclr);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_dir_out(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->dir) | (1 << pin),
|
|
+ ltq_port_base[port].base->dir);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_dir_in(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->dir) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->dir);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_output(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->out) | (1 << pin),
|
|
+ ltq_port_base[port].base->out);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_output(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->out) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->out);
|
|
+}
|
|
+
|
|
+static inline
|
|
+int ltq_port_get_input(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return (port_r32(ltq_port_base[port].base->in) & (1 << pin)) == 0;
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_puen(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->puen) | (1 << pin),
|
|
+ ltq_port_base[port].base->puen);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_puen(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->puen) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->puen);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_altsel0(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->altsel0) | (1 << pin),
|
|
+ ltq_port_base[port].base->altsel0);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_altsel0(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->altsel0) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->altsel0);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_set_altsel1(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->altsel1) | (1 << pin),
|
|
+ ltq_port_base[port].base->altsel1);
|
|
+}
|
|
+
|
|
+static inline
|
|
+void ltq_port_clear_altsel1(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return;
|
|
+
|
|
+ port_w32(port_r32(ltq_port_base[port].base->altsel1) & ~(1 << pin),
|
|
+ ltq_port_base[port].base->altsel1);
|
|
+}
|
|
+
|
|
+void ltq_gpio_configure(int port, int pin, bool dirin, bool puen,
|
|
+ bool altsel0, bool altsel1)
|
|
+{
|
|
+ if (dirin)
|
|
+ ltq_port_set_dir_in(port, pin);
|
|
+ else
|
|
+ ltq_port_set_dir_out(port, pin);
|
|
+
|
|
+ if (puen)
|
|
+ ltq_port_set_puen(port, pin);
|
|
+ else
|
|
+ ltq_port_clear_puen(port, pin);
|
|
+
|
|
+ if (altsel0)
|
|
+ ltq_port_set_altsel0(port, pin);
|
|
+ else
|
|
+ ltq_port_clear_altsel0(port, pin);
|
|
+
|
|
+ if (altsel1)
|
|
+ ltq_port_set_altsel1(port, pin);
|
|
+ else
|
|
+ ltq_port_clear_altsel1(port, pin);
|
|
+}
|
|
+
|
|
+int ltq_port_get_dir(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return (port_r32(ltq_port_base[port].base->dir) & (1 << pin)) != 0;
|
|
+}
|
|
+
|
|
+int ltq_port_get_puden(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return (port_r32(ltq_port_base[port].base->puen) & (1 << pin)) != 0;
|
|
+}
|
|
+
|
|
+int ltq_port_get_altsel0(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return (port_r32(ltq_port_base[port].base->altsel0) & (1 << pin)) != 0;
|
|
+}
|
|
+
|
|
+int ltq_port_get_altsel1(unsigned int port, unsigned int pin)
|
|
+{
|
|
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return (port_r32(ltq_port_base[port].base->altsel1) & (1 << pin)) != 0;
|
|
+}
|
|
+
|
|
+struct ltq_gpio_port {
|
|
+ struct gpio_chip gpio_chip;
|
|
+ unsigned int irq_base;
|
|
+ unsigned int chained_irq;
|
|
+};
|
|
+
|
|
+static struct ltq_gpio_port ltq_gpio_port[MAX_PORTS];
|
|
+
|
|
+static int gpio_exported;
|
|
+static int __init gpio_export_setup(char *str)
|
|
+{
|
|
+ get_option(&str, &gpio_exported);
|
|
+ return 1;
|
|
+}
|
|
+__setup("gpio_exported=", gpio_export_setup);
|
|
+
|
|
+static inline unsigned int offset2port(unsigned int offset)
|
|
+{
|
|
+ unsigned int i;
|
|
+ unsigned int prev = 0;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(ltq_port_base); i++) {
|
|
+ if (offset >= prev &&
|
|
+ offset < prev + ltq_port_base[i].pins)
|
|
+ return i;
|
|
+
|
|
+ prev = ltq_port_base[i].pins;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline unsigned int offset2pin(unsigned int offset)
|
|
+{
|
|
+ unsigned int i;
|
|
+ unsigned int prev = 0;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(ltq_port_base); i++) {
|
|
+ if (offset >= prev &&
|
|
+ offset < prev + ltq_port_base[i].pins)
|
|
+ return offset - prev;
|
|
+
|
|
+ prev = ltq_port_base[i].pins;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
|
|
+{
|
|
+ ltq_port_set_dir_in(offset2port(offset), offset2pin(offset));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ltq_gpio_direction_output(struct gpio_chip *chip,
|
|
+ unsigned int offset, int value)
|
|
+{
|
|
+ ltq_port_set_dir_out(offset2port(offset), offset2pin(offset));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset)
|
|
+{
|
|
+ return ltq_port_get_input(offset2port(offset), offset2pin(offset));
|
|
+}
|
|
+
|
|
+static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
|
|
+{
|
|
+ if (value)
|
|
+ ltq_port_set_output(offset2port(offset), offset2pin(offset));
|
|
+ else
|
|
+ ltq_port_clear_output(offset2port(offset), offset2pin(offset));
|
|
+}
|
|
+
|
|
+static int svip_gpio_request(struct gpio_chip *chip, unsigned offset)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ltq_gpio_free(struct gpio_chip *chip, unsigned offset)
|
|
+{
|
|
+}
|
|
+
|
|
+static int ltq_gpio_probe(struct platform_device *pdev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct ltq_gpio_port *gpio_port;
|
|
+
|
|
+ if (pdev->id >= MAX_PORTS)
|
|
+ return -ENODEV;
|
|
+
|
|
+ gpio_port = <q_gpio_port[pdev->id];
|
|
+ gpio_port->gpio_chip.label = "ltq-gpio";
|
|
+
|
|
+ gpio_port->gpio_chip.direction_input = ltq_gpio_direction_input;
|
|
+ gpio_port->gpio_chip.direction_output = ltq_gpio_direction_output;
|
|
+ gpio_port->gpio_chip.get = ltq_gpio_get;
|
|
+ gpio_port->gpio_chip.set = ltq_gpio_set;
|
|
+ gpio_port->gpio_chip.request = svip_gpio_request;
|
|
+ gpio_port->gpio_chip.free = ltq_gpio_free;
|
|
+ gpio_port->gpio_chip.base = 100 * pdev->id;
|
|
+ gpio_port->gpio_chip.ngpio = 32;
|
|
+ gpio_port->gpio_chip.dev = &pdev->dev;
|
|
+ gpio_port->gpio_chip.exported = gpio_exported;
|
|
+
|
|
+ ret = gpiochip_add(&gpio_port->gpio_chip);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&pdev->dev, "Could not register gpiochip %d, %d\n",
|
|
+ pdev->id, ret);
|
|
+ goto err;
|
|
+ }
|
|
+ platform_set_drvdata(pdev, gpio_port);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int ltq_gpio_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct ltq_gpio_port *gpio_port = platform_get_drvdata(pdev);
|
|
+ int ret;
|
|
+
|
|
+ ret = gpiochip_remove(&gpio_port->gpio_chip);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static struct platform_driver ltq_gpio_driver = {
|
|
+ .probe = ltq_gpio_probe,
|
|
+ .remove = __devexit_p(ltq_gpio_remove),
|
|
+ .driver = {
|
|
+ .name = DRV_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ },
|
|
+};
|
|
+
|
|
+int __init ltq_gpio_init(void)
|
|
+{
|
|
+ int ret = platform_driver_register(<q_gpio_driver);
|
|
+ if (ret)
|
|
+ printk(KERN_INFO DRV_NAME
|
|
+ ": Error registering platform driver!");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+postcore_initcall(ltq_gpio_init);
|
|
+
|
|
+/**
|
|
+ * Convert interrupt number to corresponding port/pin pair
|
|
+ * Returns the port/pin pair serving the selected external interrupt;
|
|
+ * needed since mapping not linear.
|
|
+ *
|
|
+ * \param exint External interrupt number
|
|
+ * \param port Pointer for resulting port
|
|
+ * \param pin Pointer for resutling pin
|
|
+ * \return -EINVAL Invalid exint
|
|
+ * \return 0 port/pin updated
|
|
+ * \ingroup API
|
|
+ */
|
|
+static int ltq_exint2port(u32 exint, int *port, int *pin)
|
|
+{
|
|
+ if ((exint >= 0) && (exint <= 10)) {
|
|
+ *port = 0;
|
|
+ *pin = exint + 7;
|
|
+ } else if ((exint >= 11) && (exint <= 14)) {
|
|
+ *port = 1;
|
|
+ *pin = 18 - (exint - 11) ;
|
|
+ } else if (exint == 15) {
|
|
+ *port = 1;
|
|
+ *pin = 19;
|
|
+ } else if (exint == 16) {
|
|
+ *port = 0;
|
|
+ *pin = 19;
|
|
+ } else {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Enable external interrupt.
|
|
+ * This function enables an external interrupt and sets the given mode.
|
|
+ * valid values for mode are:
|
|
+ * - 0 = Interrupt generation disabled
|
|
+ * - 1 = Interrupt on rising edge
|
|
+ * - 2 = Interrupt on falling edge
|
|
+ * - 3 = Interrupt on rising and falling edge
|
|
+ * - 5 = Interrupt on high level detection
|
|
+ * - 6 = Interrupt on low level detection
|
|
+ *
|
|
+ * \param exint - Number of external interrupt
|
|
+ * \param mode - Trigger mode
|
|
+ * \return 0 on success
|
|
+ * \ingroup API
|
|
+ */
|
|
+int ifx_enable_external_int(u32 exint, u32 mode)
|
|
+{
|
|
+ int port;
|
|
+ int pin;
|
|
+
|
|
+ if ((mode < 0) || (mode > 6))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (ltq_exint2port(exint, &port, &pin))
|
|
+ return -EINVAL;
|
|
+
|
|
+ ltq_port_clear_exintcr0(port, pin);
|
|
+ ltq_port_clear_exintcr1(port, pin);
|
|
+ ltq_port_clear_irncfg(port, pin);
|
|
+
|
|
+ if (mode & 0x1)
|
|
+ ltq_port_set_exintcr0(port, pin);
|
|
+ if (mode & 0x2)
|
|
+ ltq_port_set_exintcr1(port, pin);
|
|
+ if (mode & 0x4)
|
|
+ ltq_port_set_irncfg(port, pin);
|
|
+
|
|
+ ltq_port_set_irnen(port, pin);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(ifx_enable_external_int);
|
|
+
|
|
+/**
|
|
+ * Disable external interrupt.
|
|
+ * This function disables an external interrupt and sets mode to 0x00.
|
|
+ *
|
|
+ * \param exint - Number of external interrupt
|
|
+ * \return 0 on success
|
|
+ * \ingroup API
|
|
+ */
|
|
+int ifx_disable_external_int(u32 exint)
|
|
+{
|
|
+ int port;
|
|
+ int pin;
|
|
+
|
|
+ if (ltq_exint2port(exint, &port, &pin))
|
|
+ return -EINVAL;
|
|
+
|
|
+ ltq_port_clear_irnen(port, pin);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(ifx_disable_external_int);
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/prom.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/prom.c 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -0,0 +1,73 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/time.h>
|
|
+#include <asm/bootinfo.h>
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+
|
|
+#include "../prom.h"
|
|
+#include "../clk.h"
|
|
+#include "../machtypes.h"
|
|
+
|
|
+#include <base_reg.h>
|
|
+#include <ebu_reg.h>
|
|
+
|
|
+#define SOC_SVIP "SVIP"
|
|
+
|
|
+#define PART_SHIFT 12
|
|
+#define PART_MASK 0x0FFFF000
|
|
+#define REV_SHIFT 28
|
|
+#define REV_MASK 0xF0000000
|
|
+
|
|
+static struct svip_reg_ebu *const ebu = (struct svip_reg_ebu *)LTQ_EBU_BASE;
|
|
+
|
|
+void __init ltq_soc_init(void)
|
|
+{
|
|
+ clkdev_add_static(ltq_svip_cpu_hz(), ltq_svip_fpi_hz(),
|
|
+ ltq_svip_io_region_clock());
|
|
+}
|
|
+
|
|
+void __init
|
|
+ltq_soc_setup(void)
|
|
+{
|
|
+ if (mips_machtype == LANTIQ_MACH_EASY33016 ||
|
|
+ mips_machtype == LANTIQ_MACH_EASY336) {
|
|
+ ebu_w32(0x120000f1, addr_sel_2);
|
|
+ ebu_w32(LTQ_EBU_CON_0_ADSWP |
|
|
+ LTQ_EBU_CON_0_SETUP |
|
|
+ LTQ_EBU_CON_0_BCGEN_VAL(0x02) |
|
|
+ LTQ_EBU_CON_0_WAITWRC_VAL(7) |
|
|
+ LTQ_EBU_CON_0_WAITRDC_VAL(3) |
|
|
+ LTQ_EBU_CON_0_HOLDC_VAL(3) |
|
|
+ LTQ_EBU_CON_0_RECOVC_VAL(3) |
|
|
+ LTQ_EBU_CON_0_CMULT_VAL(3), con_2);
|
|
+ }
|
|
+}
|
|
+
|
|
+void __init
|
|
+ltq_soc_detect(struct ltq_soc_info *i)
|
|
+{
|
|
+ i->partnum = (ltq_r32(LTQ_STATUS_CHIPID) & PART_MASK) >> PART_SHIFT;
|
|
+ i->rev = (ltq_r32(LTQ_STATUS_CHIPID) & REV_MASK) >> REV_SHIFT;
|
|
+ sprintf(i->rev_type, "1.%d", i->rev);
|
|
+ switch (i->partnum) {
|
|
+ case SOC_ID_SVIP:
|
|
+ i->name = SOC_SVIP;
|
|
+ i->type = SOC_TYPE_SVIP;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ printk(KERN_ERR "unknown partnum : 0x%08X\n", i->partnum);
|
|
+ while (1);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/reset.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/reset.c 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -0,0 +1,95 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published
|
|
+ * by the Free Software Foundation.
|
|
+ *
|
|
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
|
|
+ */
|
|
+
|
|
+#include <linux/init.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/module.h>
|
|
+#include <asm/reboot.h>
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+#include "../machtypes.h"
|
|
+#include <base_reg.h>
|
|
+#include <sys1_reg.h>
|
|
+#include <boot_reg.h>
|
|
+#include <ebu_reg.h>
|
|
+
|
|
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
|
|
+static struct svip_reg_ebu *const ebu = (struct svip_reg_ebu *)LTQ_EBU_BASE;
|
|
+
|
|
+#define CPLD_CMDREG3 ((volatile unsigned char*)(KSEG1 + 0x120000f3))
|
|
+extern void switchip_reset(void);
|
|
+
|
|
+static void ltq_machine_restart(char *command)
|
|
+{
|
|
+ printk(KERN_NOTICE "System restart\n");
|
|
+ local_irq_disable();
|
|
+
|
|
+ if (mips_machtype == LANTIQ_MACH_EASY33016 ||
|
|
+ mips_machtype == LANTIQ_MACH_EASY336) {
|
|
+ /* We just use the CPLD function to reset the entire system as a
|
|
+ workaround for the switch reset problem */
|
|
+ local_irq_disable();
|
|
+ ebu_w32(0x120000f1, addr_sel_2);
|
|
+ ebu_w32(0x404027ff, con_2);
|
|
+
|
|
+ if (mips_machtype == LANTIQ_MACH_EASY336)
|
|
+ /* set bit 0 to reset SVIP */
|
|
+ *CPLD_CMDREG3 = (1<<0);
|
|
+ else
|
|
+ /* set bit 7 to reset SVIP, set bit 3 to reset xT */
|
|
+ *CPLD_CMDREG3 = (1<<7) | (1<<3);
|
|
+ } else {
|
|
+ *LTQ_BOOT_RVEC(0) = 0;
|
|
+ /* reset all except PER, SUBSYS and CPU0 */
|
|
+ sys1_w32(0x00043F3E, rreqr);
|
|
+ /* release WDT0 reset */
|
|
+ sys1_w32(0x00000100, rrlsr);
|
|
+ /* restore reset value for clock enables */
|
|
+ sys1_w32(~0x0c000040, clkclr);
|
|
+ /* reset SUBSYS (incl. DDR2) and CPU0 */
|
|
+ sys1_w32(0x00030001, rbtr);
|
|
+ }
|
|
+
|
|
+ for (;;)
|
|
+ ;
|
|
+}
|
|
+
|
|
+static void ltq_machine_halt(void)
|
|
+{
|
|
+ printk(KERN_NOTICE "System halted.\n");
|
|
+ local_irq_disable();
|
|
+ for (;;)
|
|
+ ;
|
|
+}
|
|
+
|
|
+static void ltq_machine_power_off(void)
|
|
+{
|
|
+ printk(KERN_NOTICE "Please turn off the power now.\n");
|
|
+ local_irq_disable();
|
|
+ for (;;)
|
|
+ ;
|
|
+}
|
|
+
|
|
+/* This function is used by the watchdog driver */
|
|
+int ltq_reset_cause(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
|
|
+
|
|
+static int __init mips_reboot_setup(void)
|
|
+{
|
|
+ _machine_restart = ltq_machine_restart;
|
|
+ _machine_halt = ltq_machine_halt;
|
|
+ pm_power_off = ltq_machine_power_off;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+arch_initcall(mips_reboot_setup);
|
|
Index: linux-3.3.8/arch/mips/lantiq/machtypes.h
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/machtypes.h 2012-07-31 19:51:33.989105912 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/machtypes.h 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -16,6 +16,12 @@
|
|
LTQ_MACH_EASY50712, /* Danube evaluation board */
|
|
LTQ_MACH_EASY50601, /* Amazon SE evaluation board */
|
|
|
|
+ /* SVIP */
|
|
+ LANTIQ_MACH_EASY33016, /* SVIP Easy33016 */
|
|
+ LANTIQ_MACH_EASY336, /* SVIP Easy336, NOR Flash */
|
|
+ LANTIQ_MACH_EASY336SF, /* SVIP Easy336, Serial Flash */
|
|
+ LANTIQ_MACH_EASY336NAND, /* SVIP Easy336, NAND Flash */
|
|
+
|
|
/* FALCON */
|
|
LANTIQ_MACH_EASY98000, /* Falcon Eval Board, NOR Flash */
|
|
LANTIQ_MACH_EASY98000SF, /* Falcon Eval Board, Serial Flash */
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/mach-easy33016.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/mach-easy33016.c 2012-07-31 19:51:34.137105918 +0200
|
|
@@ -0,0 +1,73 @@
|
|
+#include <linux/init.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/leds.h>
|
|
+#include <linux/gpio.h>
|
|
+#include <linux/gpio_buttons.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/input.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/spi/spi.h>
|
|
+#include <linux/spi/flash.h>
|
|
+#include "../machtypes.h"
|
|
+
|
|
+#include <sys1_reg.h>
|
|
+#include <sys2_reg.h>
|
|
+#include <svip_pms.h>
|
|
+
|
|
+#include "devices.h"
|
|
+
|
|
+static const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS] = {
|
|
+ LTQ_MUX_P0_0_SSC0_MTSR,
|
|
+ LTQ_MUX_P0_1_SSC0_MRST,
|
|
+ LTQ_MUX_P0_2_SSC0_SCLK,
|
|
+ LTQ_MUX_P0_3_SSC1_MTSR,
|
|
+ LTQ_MUX_P0_4_SSC1_MRST,
|
|
+ LTQ_MUX_P0_5_SSC1_SCLK,
|
|
+ LTQ_MUX_P0_6_SSC0_CS0,
|
|
+ LTQ_MUX_P0_7_SSC0_CS1,
|
|
+ LTQ_MUX_P0_8_SSC0_CS2,
|
|
+ LTQ_MUX_P0_9,
|
|
+ LTQ_MUX_P0_10,
|
|
+ LTQ_MUX_P0_11_EXINT4,
|
|
+ LTQ_MUX_P0_12,
|
|
+ LTQ_MUX_P0_13,
|
|
+ LTQ_MUX_P0_14_ASC0_TXD,
|
|
+ LTQ_MUX_P0_15_ASC0_RXD,
|
|
+ LTQ_MUX_P0_16_EXINT9,
|
|
+ LTQ_MUX_P0_17_EXINT10,
|
|
+ LTQ_MUX_P0_18_EJ_BRKIN,
|
|
+ LTQ_MUX_P0_19_EXINT16
|
|
+};
|
|
+
|
|
+static void __init easy33016_init(void)
|
|
+{
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_L2C |
|
|
+ SYS1_CLKENR_DDR2 |
|
|
+ SYS1_CLKENR_SMI2 |
|
|
+ SYS1_CLKENR_SMI1 |
|
|
+ SYS1_CLKENR_SMI0 |
|
|
+ SYS1_CLKENR_FMI0 |
|
|
+ SYS1_CLKENR_DMA |
|
|
+ SYS1_CLKENR_SSC0 |
|
|
+ SYS1_CLKENR_SSC1 |
|
|
+ SYS1_CLKENR_EBU);
|
|
+
|
|
+ svip_sys2_clk_enable(SYS2_CLKENR_HWSYNC |
|
|
+ SYS2_CLKENR_MBS |
|
|
+ SYS2_CLKENR_SWINT);
|
|
+
|
|
+ svip_register_mux(mux_p0, NULL, NULL, NULL, NULL);
|
|
+ svip_register_asc(0);
|
|
+ svip_register_eth();
|
|
+ svip_register_virtual_eth();
|
|
+ ltq_register_wdt();
|
|
+ svip_register_gpio();
|
|
+ svip_register_spi();
|
|
+ svip_register_nand();
|
|
+}
|
|
+
|
|
+MIPS_MACHINE(LANTIQ_MACH_EASY33016,
|
|
+ "EASY33016",
|
|
+ "EASY33016",
|
|
+ easy33016_init);
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/mach-easy336.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/mach-easy336.c 2012-07-31 19:51:34.141105918 +0200
|
|
@@ -0,0 +1,221 @@
|
|
+#include <linux/init.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/leds.h>
|
|
+#include <linux/gpio.h>
|
|
+#include <linux/gpio_buttons.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/input.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/spi/spi.h>
|
|
+#include <linux/spi/flash.h>
|
|
+#include "../machtypes.h"
|
|
+
|
|
+#include <sys1_reg.h>
|
|
+#include <sys2_reg.h>
|
|
+#include <svip_pms.h>
|
|
+
|
|
+#include "devices.h"
|
|
+
|
|
+static struct mtd_partition easy336_sflash_partitions[] = {
|
|
+ {
|
|
+ .name = "SPI flash",
|
|
+ .size = MTDPART_SIZ_FULL,
|
|
+ .offset = 0,
|
|
+ },
|
|
+};
|
|
+
|
|
+static struct flash_platform_data easy336_sflash_data = {
|
|
+ .name = "m25p32",
|
|
+ .parts = (void *)&easy336_sflash_partitions,
|
|
+ .nr_parts = ARRAY_SIZE(easy336_sflash_partitions),
|
|
+ .type = "m25p32",
|
|
+};
|
|
+
|
|
+static struct spi_board_info bdinfo[] __initdata = {
|
|
+ {
|
|
+ .modalias = "m25p80",
|
|
+ .platform_data = &easy336_sflash_data,
|
|
+ .mode = SPI_MODE_0,
|
|
+ .irq = -1,
|
|
+ .max_speed_hz = 25000000,
|
|
+ .bus_num = 0,
|
|
+ .chip_select = 0,
|
|
+ }
|
|
+};
|
|
+
|
|
+static struct mtd_partition easy336_partitions[] = {
|
|
+ {
|
|
+ .name = "uboot",
|
|
+ .offset = 0x0,
|
|
+ .size = 0x40000,
|
|
+ },
|
|
+ {
|
|
+ .name = "uboot_env",
|
|
+ .offset = 0x40000,
|
|
+ .size = 0x20000,
|
|
+ },
|
|
+ {
|
|
+ .name = "linux",
|
|
+ .offset = 0x60000,
|
|
+ .size = 0x1a0000,
|
|
+ },
|
|
+ {
|
|
+ .name = "rootfs",
|
|
+ .offset = 0x200000,
|
|
+ .size = 0x500000,
|
|
+ },
|
|
+};
|
|
+
|
|
+static struct physmap_flash_data easy336_flash_data = {
|
|
+ .nr_parts = ARRAY_SIZE(easy336_partitions),
|
|
+ .parts = easy336_partitions,
|
|
+};
|
|
+
|
|
+static const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS] = {
|
|
+ LTQ_MUX_P0_0_SSC0_MTSR,
|
|
+ LTQ_MUX_P0_1_SSC0_MRST,
|
|
+ LTQ_MUX_P0_2_SSC0_SCLK,
|
|
+ LTQ_MUX_P0_3_SSC1_MTSR,
|
|
+ LTQ_MUX_P0_4_SSC1_MRST,
|
|
+ LTQ_MUX_P0_5_SSC1_SCLK,
|
|
+ LTQ_MUX_P0_6_SSC0_CS0,
|
|
+ LTQ_MUX_P0_7_SSC0_CS1,
|
|
+ LTQ_MUX_P0_8_SSC0_CS2,
|
|
+ LTQ_MUX_P0_9_SSC0_CS3,
|
|
+ LTQ_MUX_P0_10_SSC0_CS4,
|
|
+ LTQ_MUX_P0_11_SSC0_CS5,
|
|
+ LTQ_MUX_P0_12_EXINT5,
|
|
+ LTQ_MUX_P0_13_EXINT6,
|
|
+ LTQ_MUX_P0_14_ASC0_TXD,
|
|
+ LTQ_MUX_P0_15_ASC0_RXD,
|
|
+ LTQ_MUX_P0_16_EXINT9,
|
|
+ LTQ_MUX_P0_17_EXINT10,
|
|
+ LTQ_MUX_P0_18_EJ_BRKIN,
|
|
+ LTQ_MUX_P0_19_EXINT16
|
|
+};
|
|
+
|
|
+static const struct ltq_mux_pin mux_p2[LTQ_MUX_P2_PINS] = {
|
|
+ LTQ_MUX_P2_0_EBU_A0,
|
|
+ LTQ_MUX_P2_1_EBU_A1,
|
|
+ LTQ_MUX_P2_2_EBU_A2,
|
|
+ LTQ_MUX_P2_3_EBU_A3,
|
|
+ LTQ_MUX_P2_4_EBU_A4,
|
|
+ LTQ_MUX_P2_5_EBU_A5,
|
|
+ LTQ_MUX_P2_6_EBU_A6,
|
|
+ LTQ_MUX_P2_7_EBU_A7,
|
|
+ LTQ_MUX_P2_8_EBU_A8,
|
|
+ LTQ_MUX_P2_9_EBU_A9,
|
|
+ LTQ_MUX_P2_10_EBU_A10,
|
|
+ LTQ_MUX_P2_11_EBU_A11,
|
|
+ LTQ_MUX_P2_12_EBU_RD,
|
|
+ LTQ_MUX_P2_13_EBU_WR,
|
|
+ LTQ_MUX_P2_14_EBU_ALE,
|
|
+ LTQ_MUX_P2_15_EBU_WAIT,
|
|
+ LTQ_MUX_P2_16_EBU_RDBY,
|
|
+ LTQ_MUX_P2_17_EBU_BC0,
|
|
+ LTQ_MUX_P2_18_EBU_BC1
|
|
+};
|
|
+
|
|
+static const struct ltq_mux_pin mux_p3[LTQ_MUX_P3_PINS] = {
|
|
+ LTQ_MUX_P3_0_EBU_AD0,
|
|
+ LTQ_MUX_P3_1_EBU_AD1,
|
|
+ LTQ_MUX_P3_2_EBU_AD2,
|
|
+ LTQ_MUX_P3_3_EBU_AD3,
|
|
+ LTQ_MUX_P3_4_EBU_AD4,
|
|
+ LTQ_MUX_P3_5_EBU_AD5,
|
|
+ LTQ_MUX_P3_6_EBU_AD6,
|
|
+ LTQ_MUX_P3_7_EBU_AD7,
|
|
+ LTQ_MUX_P3_8_EBU_AD8,
|
|
+ LTQ_MUX_P3_9_EBU_AD9,
|
|
+ LTQ_MUX_P3_10_EBU_AD10,
|
|
+ LTQ_MUX_P3_11_EBU_AD11,
|
|
+ LTQ_MUX_P3_12_EBU_AD12,
|
|
+ LTQ_MUX_P3_13_EBU_AD13,
|
|
+ LTQ_MUX_P3_14_EBU_AD14,
|
|
+ LTQ_MUX_P3_15_EBU_AD15,
|
|
+ LTQ_MUX_P3_16_EBU_CS0,
|
|
+ LTQ_MUX_P3_17_EBU_CS1,
|
|
+ LTQ_MUX_P3_18_EBU_CS2,
|
|
+ LTQ_MUX_P3_19_EBU_CS3
|
|
+};
|
|
+
|
|
+static void __init easy336_init_common(void)
|
|
+{
|
|
+ svip_sys1_clk_enable(SYS1_CLKENR_L2C |
|
|
+ SYS1_CLKENR_DDR2 |
|
|
+ SYS1_CLKENR_SMI2 |
|
|
+ SYS1_CLKENR_SMI1 |
|
|
+ SYS1_CLKENR_SMI0 |
|
|
+ SYS1_CLKENR_FMI0 |
|
|
+ SYS1_CLKENR_DMA |
|
|
+ SYS1_CLKENR_GPTC |
|
|
+ SYS1_CLKENR_EBU);
|
|
+
|
|
+ svip_sys2_clk_enable(SYS2_CLKENR_HWSYNC |
|
|
+ SYS2_CLKENR_MBS |
|
|
+ SYS2_CLKENR_SWINT |
|
|
+ SYS2_CLKENR_HWACC3 |
|
|
+ SYS2_CLKENR_HWACC2 |
|
|
+ SYS2_CLKENR_HWACC1 |
|
|
+ SYS2_CLKENR_HWACC0 |
|
|
+ SYS2_CLKENR_SIF7 |
|
|
+ SYS2_CLKENR_SIF6 |
|
|
+ SYS2_CLKENR_SIF5 |
|
|
+ SYS2_CLKENR_SIF4 |
|
|
+ SYS2_CLKENR_SIF3 |
|
|
+ SYS2_CLKENR_SIF2 |
|
|
+ SYS2_CLKENR_SIF1 |
|
|
+ SYS2_CLKENR_SIF0 |
|
|
+ SYS2_CLKENR_DFEV7 |
|
|
+ SYS2_CLKENR_DFEV6 |
|
|
+ SYS2_CLKENR_DFEV5 |
|
|
+ SYS2_CLKENR_DFEV4 |
|
|
+ SYS2_CLKENR_DFEV3 |
|
|
+ SYS2_CLKENR_DFEV2 |
|
|
+ SYS2_CLKENR_DFEV1 |
|
|
+ SYS2_CLKENR_DFEV0);
|
|
+
|
|
+ svip_register_mux(mux_p0, NULL, mux_p2, mux_p3, NULL);
|
|
+ svip_register_asc(0);
|
|
+ svip_register_eth();
|
|
+ svip_register_virtual_eth();
|
|
+ /* ltq_register_wdt(); - conflicts with lq_switch */
|
|
+ svip_register_gpio();
|
|
+ svip_register_spi();
|
|
+ ltq_register_tapi();
|
|
+}
|
|
+
|
|
+static void __init easy336_init(void)
|
|
+{
|
|
+ easy336_init_common();
|
|
+ ltq_register_nor(&easy336_flash_data);
|
|
+}
|
|
+
|
|
+static void __init easy336sf_init(void)
|
|
+{
|
|
+ easy336_init_common();
|
|
+ svip_register_spi_flash(bdinfo);
|
|
+}
|
|
+
|
|
+static void __init easy336nand_init(void)
|
|
+{
|
|
+ easy336_init_common();
|
|
+ svip_register_nand();
|
|
+}
|
|
+
|
|
+MIPS_MACHINE(LANTIQ_MACH_EASY336,
|
|
+ "EASY336",
|
|
+ "EASY336",
|
|
+ easy336_init);
|
|
+
|
|
+MIPS_MACHINE(LANTIQ_MACH_EASY336SF,
|
|
+ "EASY336SF",
|
|
+ "EASY336 (Serial Flash)",
|
|
+ easy336sf_init);
|
|
+
|
|
+MIPS_MACHINE(LANTIQ_MACH_EASY336NAND,
|
|
+ "EASY336NAND",
|
|
+ "EASY336 (NAND Flash)",
|
|
+ easy336nand_init);
|
|
+
|
|
Index: linux-3.3.8/drivers/net/ethernet/svip_virtual_eth.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/drivers/net/ethernet/svip_virtual_eth.c 2012-07-31 19:51:34.141105918 +0200
|
|
@@ -0,0 +1,346 @@
|
|
+/******************************************************************************
|
|
+
|
|
+ Copyright (c) 2007
|
|
+ Infineon Technologies AG
|
|
+ Am Campeon 1-12; 81726 Munich, Germany
|
|
+
|
|
+ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE,
|
|
+ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS
|
|
+ SOFTWARE IS FREE OF CHARGE.
|
|
+
|
|
+ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS
|
|
+ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING
|
|
+ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP,
|
|
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE
|
|
+ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD
|
|
+ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL
|
|
+ PROPERTY INFRINGEMENT.
|
|
+
|
|
+ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT
|
|
+ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM
|
|
+ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
+ DEALINGS IN THE SOFTWARE.
|
|
+
|
|
+ ****************************************************************************
|
|
+Module : svip_virtual_eth.c
|
|
+
|
|
+Description : This file contains network driver implementation for a
|
|
+Virtual Ethernet interface. The Virtual Ethernet interface
|
|
+is part of Infineon's VINETIC-SVIP Linux BSP.
|
|
+ *******************************************************************************/
|
|
+#include <linux/module.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/init.h>
|
|
+
|
|
+#define SVIP_VETH_VER_STR "3.0"
|
|
+#define SVIP_VETH_INFO_STR \
|
|
+ "@(#)SVIP virtual ethernet interface, version " SVIP_VETH_VER_STR
|
|
+
|
|
+/******************************************************************************
|
|
+ * Local define/macro definitions
|
|
+ ******************************************************************************/
|
|
+struct svip_ve_priv
|
|
+{
|
|
+ struct net_device_stats stats;
|
|
+};
|
|
+
|
|
+/******************************************************************************
|
|
+ * Global function declarations
|
|
+ ******************************************************************************/
|
|
+int svip_ve_rx(struct sk_buff *skb);
|
|
+
|
|
+/******************************************************************************
|
|
+ * Local variable declarations
|
|
+ ******************************************************************************/
|
|
+static struct net_device *svip_ve_dev;
|
|
+static int watchdog_timeout = 10*HZ;
|
|
+static int (*svip_ve_mps_xmit)(struct sk_buff *skb) = NULL;
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ * Global function declarations
|
|
+ ******************************************************************************/
|
|
+
|
|
+/**
|
|
+ * Called by MPS driver to register a transmit routine called for each outgoing
|
|
+ * VoFW0 message.
|
|
+ *
|
|
+ * \param mps_xmit pointer to transmit routine
|
|
+ *
|
|
+ * \return none
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+void register_mps_xmit_routine(int (*mps_xmit)(struct sk_buff *skb))
|
|
+{
|
|
+ svip_ve_mps_xmit = mps_xmit;
|
|
+}
|
|
+EXPORT_SYMBOL(register_mps_xmit_routine);
|
|
+
|
|
+/**
|
|
+ * Returns a pointer to the routine used to deliver an incoming packet/message
|
|
+ * from the MPS mailbox to the networking layer. This routine is called by MPS
|
|
+ * driver during initialisation time.
|
|
+ *
|
|
+ * \param skb pointer to incoming socket buffer
|
|
+ *
|
|
+ * \return svip_ve_rx pointer to incoming messages delivering routine
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int (*register_mps_recv_routine(void)) (struct sk_buff *skb)
|
|
+{
|
|
+ return svip_ve_rx;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Used to deliver outgoing packets to VoFW0 module through the MPS driver.
|
|
+ * Upon loading/initialisation the MPS driver is registering a transmitting
|
|
+ * routine, which is called here to deliver the packet to the VoFW0 module.
|
|
+ *
|
|
+ * \param skb pointer to skb containing outgoing data
|
|
+ * \param dev pointer to this networking device's data
|
|
+ *
|
|
+ * \return 0 on success
|
|
+ * \return non-zero on error
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+static int svip_ve_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
+{
|
|
+ int err;
|
|
+ struct svip_ve_priv *priv = netdev_priv(dev);
|
|
+ struct net_device_stats *stats = &priv->stats;
|
|
+
|
|
+ stats->tx_packets++;
|
|
+ stats->tx_bytes += skb->len;
|
|
+
|
|
+ if (svip_ve_mps_xmit)
|
|
+ {
|
|
+ err = svip_ve_mps_xmit(skb);
|
|
+ if (err)
|
|
+ stats->tx_errors++;
|
|
+ dev->trans_start = jiffies;
|
|
+ return err;
|
|
+ }
|
|
+ else
|
|
+ printk(KERN_ERR "%s: MPS driver not registered, outgoing packet not delivered\n", dev->name);
|
|
+
|
|
+ dev_kfree_skb(skb);
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Called by MPS driver upon receipt of a new message from VoFW0 module in
|
|
+ * the data inbox. The packet is pushed up the IP module for further processing.
|
|
+ *
|
|
+ * \param skb pointer to skb containing the incoming message
|
|
+ *
|
|
+ * \return 0 on success
|
|
+ * \return non-zero on error
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int svip_ve_rx(struct sk_buff *skb)
|
|
+{
|
|
+ int err;
|
|
+ struct svip_ve_priv *priv = netdev_priv(svip_ve_dev);
|
|
+ struct net_device_stats *stats = &priv->stats;
|
|
+
|
|
+ skb->dev = svip_ve_dev;
|
|
+ skb->protocol = eth_type_trans(skb, svip_ve_dev);
|
|
+
|
|
+ stats->rx_packets++;
|
|
+ stats->rx_bytes += skb->len;
|
|
+
|
|
+ err = netif_rx(skb);
|
|
+ switch (err)
|
|
+ {
|
|
+ case NET_RX_SUCCESS:
|
|
+ return 0;
|
|
+ break;
|
|
+ case NET_RX_DROP:
|
|
+ default:
|
|
+ stats->rx_dropped++;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+EXPORT_SYMBOL(svip_ve_rx);
|
|
+
|
|
+/**
|
|
+ * Returns a pointer to the device's networking statistics data
|
|
+ *
|
|
+ * \param dev pointer to this networking device's data
|
|
+ *
|
|
+ * \return stats pointer to this network device's statistics data
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+static struct net_device_stats *svip_ve_get_stats(struct net_device *dev)
|
|
+{
|
|
+ struct svip_ve_priv *priv = netdev_priv(dev);
|
|
+
|
|
+ return &priv->stats;
|
|
+}
|
|
+
|
|
+static void svip_ve_tx_timeout(struct net_device *dev)
|
|
+{
|
|
+ struct svip_ve_priv *priv = netdev_priv(dev);
|
|
+
|
|
+ priv->stats.tx_errors++;
|
|
+ netif_wake_queue(dev);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Device open routine. Called e.g. upon setting of an IP address using,
|
|
+ * 'ifconfig veth0 YYY.YYY.YYY.YYY netmask ZZZ.ZZZ.ZZZ.ZZZ' or
|
|
+ * 'ifconfig veth0 up'
|
|
+ *
|
|
+ * \param dev pointer to this network device's data
|
|
+ *
|
|
+ * \return 0 on success
|
|
+ * \return non-zero on error
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int svip_ve_open(struct net_device *dev)
|
|
+{
|
|
+ netif_start_queue(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Device close routine. Called e.g. upon calling
|
|
+ * 'ifconfig veth0 down'
|
|
+ *
|
|
+ * \param dev pointer to this network device's data
|
|
+ *
|
|
+ * \return 0 on success
|
|
+ * \return non-zero on error
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+
|
|
+int svip_ve_release(struct net_device *dev)
|
|
+{
|
|
+ netif_stop_queue(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int svip_ve_dev_init(struct net_device *dev);
|
|
+
|
|
+static const struct net_device_ops svip_virtual_eth_netdev_ops = {
|
|
+ .ndo_init = svip_ve_dev_init,
|
|
+ .ndo_open = svip_ve_open,
|
|
+ .ndo_stop = svip_ve_release,
|
|
+ .ndo_start_xmit = svip_ve_xmit,
|
|
+ .ndo_get_stats = svip_ve_get_stats,
|
|
+ .ndo_tx_timeout = svip_ve_tx_timeout,
|
|
+};
|
|
+
|
|
+
|
|
+/**
|
|
+ * Device initialisation routine which registers device interface routines.
|
|
+ * It is called upon execution of 'register_netdev' routine.
|
|
+ *
|
|
+ * \param dev pointer to this network device's data
|
|
+ *
|
|
+ * \return 0 on success
|
|
+ * \return non-zero on error
|
|
+ *
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+static int svip_ve_dev_init(struct net_device *dev)
|
|
+{
|
|
+ ether_setup(dev); /* assign some of the fields */
|
|
+
|
|
+ dev->watchdog_timeo = watchdog_timeout;
|
|
+ memset(netdev_priv(dev), 0, sizeof(struct svip_ve_priv));
|
|
+ dev->flags |= IFF_NOARP|IFF_PROMISC;
|
|
+ dev->flags &= ~IFF_MULTICAST;
|
|
+
|
|
+ /* dedicated MAC address to veth0, 00:03:19:00:15:80 */
|
|
+ dev->dev_addr[0] = 0x00;
|
|
+ dev->dev_addr[1] = 0x03;
|
|
+ dev->dev_addr[2] = 0x19;
|
|
+ dev->dev_addr[3] = 0x00;
|
|
+ dev->dev_addr[4] = 0x15;
|
|
+ dev->dev_addr[5] = 0x80;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int svip_ve_probe(struct platform_device *dev)
|
|
+{
|
|
+ int result = 0;
|
|
+
|
|
+ svip_ve_dev = alloc_etherdev(sizeof(struct svip_ve_priv));
|
|
+ svip_ve_dev->netdev_ops = &svip_virtual_eth_netdev_ops;
|
|
+
|
|
+ strcpy(svip_ve_dev->name, "veth%d");
|
|
+
|
|
+ result = register_netdev(svip_ve_dev);
|
|
+ if (result)
|
|
+ {
|
|
+ printk(KERN_INFO "error %i registering device \"%s\"\n", result, svip_ve_dev->name);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ printk (KERN_INFO "%s, (c) 2009, Lantiq Deutschland GmbH\n", &SVIP_VETH_INFO_STR[4]);
|
|
+
|
|
+out:
|
|
+ return result;
|
|
+}
|
|
+
|
|
+static int svip_ve_remove(struct platform_device *dev)
|
|
+{
|
|
+ unregister_netdev(svip_ve_dev);
|
|
+ free_netdev(svip_ve_dev);
|
|
+
|
|
+ printk(KERN_INFO "%s removed\n", svip_ve_dev->name);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_driver svip_ve_driver = {
|
|
+ .probe = svip_ve_probe,
|
|
+ .remove = svip_ve_remove,
|
|
+ .driver = {
|
|
+ .name = "ifxmips_svip_ve",
|
|
+ .owner = THIS_MODULE,
|
|
+ },
|
|
+};
|
|
+
|
|
+/**
|
|
+ * Module/driver entry routine
|
|
+ */
|
|
+static int __init svip_ve_init_module(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = platform_driver_register(&svip_ve_driver);
|
|
+ if (ret)
|
|
+ printk(KERN_INFO "SVIP: error(%d) registering virtual Ethernet driver!\n", ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Module exit routine (never called for statically linked driver)
|
|
+ */
|
|
+static void __exit svip_ve_cleanup_module(void)
|
|
+{
|
|
+ platform_driver_unregister(&svip_ve_driver);
|
|
+}
|
|
+
|
|
+module_init(svip_ve_init_module);
|
|
+module_exit(svip_ve_cleanup_module);
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_DESCRIPTION("virtual ethernet driver for LANTIQ SVIP system");
|
|
+
|
|
+EXPORT_SYMBOL(register_mps_recv_routine);
|
|
Index: linux-3.3.8/arch/mips/kernel/cevt-r4k.c
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/kernel/cevt-r4k.c 2012-06-01 09:16:13.000000000 +0200
|
|
+++ linux-3.3.8/arch/mips/kernel/cevt-r4k.c 2012-07-31 19:51:34.141105918 +0200
|
|
@@ -171,8 +171,10 @@
|
|
if (!cpu_has_counter || !mips_hpt_frequency)
|
|
return -ENXIO;
|
|
|
|
+#ifndef CONFIG_SOC_SVIP
|
|
if (!c0_compare_int_usable())
|
|
return -ENXIO;
|
|
+#endif
|
|
|
|
/*
|
|
* With vectored interrupts things are getting platform specific.
|
|
Index: linux-3.3.8/arch/mips/lantiq/clk.c
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/clk.c 2012-07-31 19:51:33.457105889 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/clk.c 2012-07-31 19:51:34.141105918 +0200
|
|
@@ -149,7 +149,13 @@
|
|
|
|
clk = clk_get_cpu();
|
|
mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
|
|
+#ifdef CONFIG_SOC_SVIP
|
|
+ write_c0_count(0);
|
|
+ write_c0_compare(mips_hpt_frequency / HZ);
|
|
+ enable_irq(MIPS_CPU_TIMER_IRQ);
|
|
+#else
|
|
write_c0_compare(read_c0_count());
|
|
+#endif
|
|
pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
|
|
clk_put(clk);
|
|
}
|
|
Index: linux-3.3.8/arch/mips/lantiq/irq.c
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/irq.c 2012-07-31 19:51:33.897105907 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/irq.c 2012-07-31 19:51:34.141105918 +0200
|
|
@@ -17,6 +17,10 @@
|
|
|
|
#include <lantiq_soc.h>
|
|
#include <irq.h>
|
|
+#ifdef CONFIG_SOC_SVIP
|
|
+#include <ebu_reg.h>
|
|
+#include <base_reg.h>
|
|
+#endif
|
|
|
|
/* register definitions */
|
|
#define LTQ_ICU_IM0_ISR 0x0000
|
|
@@ -175,7 +179,6 @@
|
|
int irq_nr = d->irq - INT_NUM_IRQ0;
|
|
unsigned int im_nr;
|
|
|
|
- irq_nr -= INT_NUM_IRQ0;
|
|
im_nr = (irq_nr / INT_NUM_IM_OFFSET);
|
|
irq_nr %= INT_NUM_IM_OFFSET;
|
|
|
|
@@ -188,7 +191,6 @@
|
|
int irq_nr = d->irq - INT_NUM_IRQ0;
|
|
unsigned int im_nr;
|
|
|
|
- irq_nr -= INT_NUM_IRQ0;
|
|
im_nr = (irq_nr / INT_NUM_IM_OFFSET);
|
|
irq_nr %= INT_NUM_IM_OFFSET;
|
|
|
|
@@ -200,7 +202,6 @@
|
|
int irq_nr = d->irq - INT_NUM_IRQ0;
|
|
unsigned int im_nr;
|
|
|
|
- irq_nr -= INT_NUM_IRQ0;
|
|
im_nr = (irq_nr / INT_NUM_IM_OFFSET);
|
|
irq_nr %= INT_NUM_IM_OFFSET;
|
|
|
|
@@ -281,10 +282,12 @@
|
|
irq = __fls(irq);
|
|
do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
|
|
|
|
+#ifndef CONFIG_SOC_SVIP
|
|
/* if this is a EBU irq, we need to ack it or get a deadlock */
|
|
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
|
|
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
|
|
LTQ_EBU_PCC_ISTAT);
|
|
+#endif
|
|
}
|
|
|
|
#define DEFINE_HWx_IRQDISPATCH(x) \
|
|
@@ -298,10 +301,14 @@
|
|
DEFINE_HWx_IRQDISPATCH(3)
|
|
DEFINE_HWx_IRQDISPATCH(4)
|
|
|
|
+#if MIPS_CPU_TIMER_IRQ == 7
|
|
static void ltq_hw5_irqdispatch(void)
|
|
{
|
|
do_IRQ(MIPS_CPU_TIMER_IRQ);
|
|
}
|
|
+#else
|
|
+DEFINE_HWx_IRQDISPATCH(5)
|
|
+#endif
|
|
|
|
#ifdef CONFIG_MIPS_MT_SMP
|
|
void __init arch_init_ipiirq(int irq, struct irqaction *action)
|
|
@@ -349,11 +356,11 @@
|
|
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
|
|
unsigned int i;
|
|
|
|
- if (pending & CAUSEF_IP7) {
|
|
+ if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
|
|
do_IRQ(MIPS_CPU_TIMER_IRQ);
|
|
goto out;
|
|
} else {
|
|
- for (i = 0; i < 5; i++) {
|
|
+ for (i = 0; i < IM_NUM; i++) {
|
|
if (pending & (CAUSEF_IP2 << i)) {
|
|
ltq_hw_irqdispatch(i);
|
|
goto out;
|
|
@@ -389,15 +396,6 @@
|
|
panic("Failed to remap icu memory\n");
|
|
}
|
|
|
|
- if (request_mem_region(ltq_icu_resource.start,
|
|
- resource_size(<q_icu_resource), "icu") < 0)
|
|
- panic("Failed to request icu memory");
|
|
-
|
|
- ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
|
|
- resource_size(<q_icu_resource));
|
|
- if (!ltq_icu_membase)
|
|
- panic("Failed to remap icu memory");
|
|
-
|
|
if (LTQ_EIU_BASE_ADDR) {
|
|
if (insert_resource(&iomem_resource, <q_eiu_resource) < 0)
|
|
panic("Failed to insert eiu memory\n");
|
|
@@ -413,7 +411,7 @@
|
|
}
|
|
|
|
/* make sure all irqs are turned off by default */
|
|
- for (i = 0; i < IM_NUM; i++)
|
|
+ for (i = 0; i < IM_NUM; i++) {
|
|
ltq_icu_w32(0, LTQ_ICU_IM0_IER, i);
|
|
/* clear all possibly pending interrupts */
|
|
ltq_icu_w32(~0, LTQ_ICU_IM0_ISR, i);
|
|
@@ -421,8 +419,8 @@
|
|
|
|
mips_cpu_irq_init();
|
|
|
|
- for (i = 2; i <= 6; i++)
|
|
- setup_irq(i, &cascade);
|
|
+ for (i = 0; i < IM_NUM; i++)
|
|
+ setup_irq(i + 2, &cascade);
|
|
|
|
if (cpu_has_vint) {
|
|
pr_info("Setting up vectored interrupts\n");
|
|
@@ -435,7 +433,7 @@
|
|
}
|
|
|
|
for (i = INT_NUM_IRQ0;
|
|
- i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
|
|
+ i <= (INT_NUM_IRQ0 + (IM_NUM * INT_NUM_IM_OFFSET)); i++)
|
|
if (((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
|
|
(i == LTQ_EIU_IR2)) && LTQ_EIU_BASE_ADDR)
|
|
irq_set_chip_and_handler(i, <q_eiu_type,
|
|
@@ -473,5 +471,9 @@
|
|
|
|
unsigned int __cpuinit get_c0_compare_int(void)
|
|
{
|
|
+#ifdef CONFIG_SOC_SVIP
|
|
+ return MIPS_CPU_TIMER_IRQ;
|
|
+#else
|
|
return CP0_LEGACY_COMPARE_IRQ;
|
|
+#endif
|
|
}
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/devices.h
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/devices.h 2012-07-31 19:51:34.141105918 +0200
|
|
@@ -0,0 +1,23 @@
|
|
+#ifndef _SVIP_DEVICES_H__
|
|
+#define _SVIP_DEVICES_H__
|
|
+
|
|
+#include <linux/mtd/physmap.h>
|
|
+#include <linux/spi/spi.h>
|
|
+#include <linux/spi/flash.h>
|
|
+#include <svip_mux.h>
|
|
+#include "../devices.h"
|
|
+
|
|
+extern void __init svip_register_asc(int port);
|
|
+extern void __init svip_register_eth(void);
|
|
+extern void __init svip_register_virtual_eth(void);
|
|
+extern void __init svip_register_spi(void);
|
|
+extern void __init svip_register_spi_flash(struct spi_board_info *bdinfo);
|
|
+extern void __init svip_register_gpio(void);
|
|
+extern void __init svip_register_mux(const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS],
|
|
+ const struct ltq_mux_pin mux_p1[LTQ_MUX_P1_PINS],
|
|
+ const struct ltq_mux_pin mux_p2[LTQ_MUX_P2_PINS],
|
|
+ const struct ltq_mux_pin mux_p3[LTQ_MUX_P3_PINS],
|
|
+ const struct ltq_mux_pin mux_p4[LTQ_MUX_P4_PINS]);
|
|
+extern void __init svip_register_nand(void);
|
|
+
|
|
+#endif
|
|
Index: linux-3.3.8/arch/mips/mm/c-r4k.c
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/mm/c-r4k.c 2012-07-31 19:51:33.433105887 +0200
|
|
+++ linux-3.3.8/arch/mips/mm/c-r4k.c 2012-07-31 19:51:34.145105918 +0200
|
|
@@ -1252,6 +1252,9 @@
|
|
way_string[c->scache.ways], c->scache.linesz);
|
|
}
|
|
#else
|
|
+#ifdef CONFIG_SOC_SVIP
|
|
+ return;
|
|
+#endif
|
|
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
|
|
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
|
|
#endif
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/dma.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/dma.c 2012-07-31 20:45:23.245243957 +0200
|
|
@@ -0,0 +1,1206 @@
|
|
+/*
|
|
+ ** Copyright (C) 2005 Wu Qi Ming <Qi-Ming.Wu@infineon.com>
|
|
+ **
|
|
+ ** This program is free software; you can redistribute it and/or modify
|
|
+ ** it under the terms of the GNU General Public License as published by
|
|
+ ** the Free Software Foundation; either version 2 of the License, or
|
|
+ ** (at your option) any later version.
|
|
+ **
|
|
+ ** This program is distributed in the hope that it will be useful,
|
|
+ ** but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ ** GNU General Public License for more details.
|
|
+ **
|
|
+ ** You should have received a copy of the GNU General Public License
|
|
+ ** along with this program; if not, write to the Free Software
|
|
+ ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
+ */
|
|
+/*
|
|
+ * Description:
|
|
+ * Driver for SVIP DMA
|
|
+ * Author: Wu Qi Ming[Qi-Ming.Wu@infineon.com]
|
|
+ * Created: 26-September-2005
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/timer.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/stat.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/tty.h>
|
|
+#include <linux/selection.h>
|
|
+#include <linux/kmod.h>
|
|
+#include <linux/vmalloc.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/semaphore.h>
|
|
+
|
|
+#include <base_reg.h>
|
|
+#include <mps_reg.h>
|
|
+#include <dma_reg.h>
|
|
+#include <svip_dma.h>
|
|
+#include <lantiq_soc.h>
|
|
+#include <irq.h>
|
|
+#include <sys1_reg.h>
|
|
+
|
|
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
|
|
+static struct svip_reg_dma *const dma = (struct svip_reg_dma *)LTQ_DMA_BASE;
|
|
+static struct svip_reg_mbs *const mbs = (struct svip_reg_mbs *)LTQ_MBS_BASE;
|
|
+
|
|
+#define DRV_NAME "ltq_dma"
|
|
+extern void ltq_mask_and_ack_irq(struct irq_data *data);
|
|
+extern void ltq_enable_irq(struct irq_data *data);
|
|
+
|
|
+static inline void mask_and_ack_irq(unsigned int irq_nr)
|
|
+{
|
|
+ static int i = 0;
|
|
+ struct irq_data data;
|
|
+ data.irq = irq_nr;
|
|
+ if ((i < 2) && (irq_nr == 137)) {
|
|
+ printk("eth delay hack\n");
|
|
+ i++;
|
|
+ }
|
|
+ ltq_mask_and_ack_irq(&data);
|
|
+}
|
|
+
|
|
+static inline void svip_enable_irq(unsigned int irq_nr)
|
|
+{
|
|
+ struct irq_data data;
|
|
+ data.irq = irq_nr;
|
|
+ ltq_enable_irq(&data);
|
|
+}
|
|
+
|
|
+#define DMA_EMSG(fmt, args...) \
|
|
+ printk(KERN_ERR "%s: " fmt, __func__, ## args)
|
|
+
|
|
+static inline void mbs_grab(void)
|
|
+{
|
|
+ while (mbs_r32(mbsr0) != 0);
|
|
+}
|
|
+
|
|
+static inline void mbs_release(void)
|
|
+{
|
|
+ mbs_w32(0, mbsr0);
|
|
+ asm("sync");
|
|
+}
|
|
+
|
|
+/* max ports connecting to dma */
|
|
+#define LTQ_MAX_DMA_DEVICE_NUM ARRAY_SIZE(dma_devices)
|
|
+/* max dma channels */
|
|
+#define LTQ_MAX_DMA_CHANNEL_NUM ARRAY_SIZE(dma_chan)
|
|
+
|
|
+/* bytes per descriptor */
|
|
+#define DMA_DESCR_SIZE 8
|
|
+
|
|
+#define DMA_DESCR_CH_SIZE (DMA_DESCR_NUM * DMA_DESCR_SIZE)
|
|
+#define DMA_DESCR_TOTAL_SIZE (LTQ_MAX_DMA_CHANNEL_NUM * DMA_DESCR_CH_SIZE)
|
|
+#define DMA_DESCR_MEM_PAGES ((DMA_DESCR_TOTAL_SIZE / PAGE_SIZE) + \
|
|
+ (((DMA_DESCR_TOTAL_SIZE % PAGE_SIZE) > 0)))
|
|
+
|
|
+/* budget for interrupt handling */
|
|
+#define DMA_INT_BUDGET 100
|
|
+/* set the correct counter value here! */
|
|
+#define DMA_POLL_COUNTER 32
|
|
+
|
|
+struct proc_dir_entry *g_dma_dir;
|
|
+
|
|
+/* device_name | max_rx_chan_num | max_tx_chan_num | drop_enable */
|
|
+struct dma_device_info dma_devices[] = {
|
|
+ { "SW", 4, 4, 0 },
|
|
+ { "DEU", 1, 1, 0 },
|
|
+ { "SSC0", 1, 1, 0 },
|
|
+ { "SSC1", 1, 1, 0 },
|
|
+ { "MCTRL", 1, 1, 0 },
|
|
+ { "PCM0", 1, 1, 0 },
|
|
+ { "PCM1", 1, 1, 0 },
|
|
+ { "PCM2", 1, 1, 0 },
|
|
+ { "PCM3", 1, 1, 0 }
|
|
+};
|
|
+
|
|
+/* *dma_dev | dir | pri | irq | rel_chan_no */
|
|
+struct dma_channel_info dma_chan[] = {
|
|
+ { &dma_devices[0], DIR_RX, 0, INT_NUM_IM4_IRL0 + 0, 0 },
|
|
+ { &dma_devices[0], DIR_TX, 0, INT_NUM_IM4_IRL0 + 1, 0 },
|
|
+ { &dma_devices[0], DIR_RX, 1, INT_NUM_IM4_IRL0 + 2, 1 },
|
|
+ { &dma_devices[0], DIR_TX, 1, INT_NUM_IM4_IRL0 + 3, 1 },
|
|
+ { &dma_devices[0], DIR_RX, 2, INT_NUM_IM4_IRL0 + 4, 2 },
|
|
+ { &dma_devices[0], DIR_TX, 2, INT_NUM_IM4_IRL0 + 5, 2 },
|
|
+ { &dma_devices[0], DIR_RX, 3, INT_NUM_IM4_IRL0 + 6, 3 },
|
|
+ { &dma_devices[0], DIR_TX, 3, INT_NUM_IM4_IRL0 + 7, 3 },
|
|
+ { &dma_devices[1], DIR_RX, 0, INT_NUM_IM4_IRL0 + 8, 0 },
|
|
+ { &dma_devices[1], DIR_TX, 0, INT_NUM_IM4_IRL0 + 9, 0 },
|
|
+ { &dma_devices[2], DIR_RX, 0, INT_NUM_IM4_IRL0 + 10, 0 },
|
|
+ { &dma_devices[2], DIR_TX, 0, INT_NUM_IM4_IRL0 + 11, 0 },
|
|
+ { &dma_devices[3], DIR_RX, 0, INT_NUM_IM4_IRL0 + 12, 0 },
|
|
+ { &dma_devices[3], DIR_TX, 0, INT_NUM_IM4_IRL0 + 13, 0 },
|
|
+ { &dma_devices[4], DIR_RX, 0, INT_NUM_IM4_IRL0 + 14, 0 },
|
|
+ { &dma_devices[4], DIR_TX, 0, INT_NUM_IM4_IRL0 + 15, 0 },
|
|
+ { &dma_devices[5], DIR_RX, 0, INT_NUM_IM4_IRL0 + 16, 0 },
|
|
+ { &dma_devices[5], DIR_TX, 0, INT_NUM_IM4_IRL0 + 17, 0 },
|
|
+ { &dma_devices[6], DIR_RX, 1, INT_NUM_IM3_IRL0 + 18, 0 },
|
|
+ { &dma_devices[6], DIR_TX, 1, INT_NUM_IM3_IRL0 + 19, 0 },
|
|
+ { &dma_devices[7], DIR_RX, 2, INT_NUM_IM4_IRL0 + 20, 0 },
|
|
+ { &dma_devices[7], DIR_TX, 2, INT_NUM_IM4_IRL0 + 21, 0 },
|
|
+ { &dma_devices[8], DIR_RX, 3, INT_NUM_IM4_IRL0 + 22, 0 },
|
|
+ { &dma_devices[8], DIR_TX, 3, INT_NUM_IM4_IRL0 + 23, 0 }
|
|
+};
|
|
+
|
|
+u64 *g_desc_list[DMA_DESCR_MEM_PAGES];
|
|
+
|
|
+volatile u32 g_dma_int_status = 0;
|
|
+
|
|
+/* 0 - not in process, 1 - in process */
|
|
+volatile int g_dma_in_process;
|
|
+
|
|
+int ltq_dma_init(void);
|
|
+void do_dma_tasklet(unsigned long);
|
|
+DECLARE_TASKLET(dma_tasklet, do_dma_tasklet, 0);
|
|
+irqreturn_t dma_interrupt(int irq, void *dev_id);
|
|
+
|
|
+u8 *common_buffer_alloc(int len, int *byte_offset, void **opt)
|
|
+{
|
|
+ u8 *buffer = kmalloc(len * sizeof(u8), GFP_KERNEL);
|
|
+ *byte_offset = 0;
|
|
+ return buffer;
|
|
+}
|
|
+
|
|
+void common_buffer_free(u8 *dataptr, void *opt)
|
|
+{
|
|
+ kfree(dataptr);
|
|
+}
|
|
+
|
|
+void enable_ch_irq(struct dma_channel_info *ch)
|
|
+{
|
|
+ int chan_no = (int)(ch - dma_chan);
|
|
+ unsigned long flag;
|
|
+ u32 val;
|
|
+
|
|
+ if (ch->dir == DIR_RX)
|
|
+ val = DMA_CIE_DESCPT | DMA_CIE_DUR;
|
|
+ else
|
|
+ val = DMA_CIE_DESCPT;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32(val, cie);
|
|
+ dma_w32_mask(0, 1 << chan_no, irnen);
|
|
+ mbs_release();
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ svip_enable_irq(ch->irq);
|
|
+}
|
|
+
|
|
+void disable_ch_irq(struct dma_channel_info *ch)
|
|
+{
|
|
+ unsigned long flag;
|
|
+ int chan_no = (int)(ch - dma_chan);
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ g_dma_int_status &= ~(1 << chan_no);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32(0, cie);
|
|
+ mbs_release();
|
|
+ dma_w32_mask(1 << chan_no, 0, irnen);
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ mask_and_ack_irq(ch->irq);
|
|
+}
|
|
+
|
|
+int open_chan(struct dma_channel_info *ch)
|
|
+{
|
|
+ unsigned long flag;
|
|
+ int j;
|
|
+ int chan_no = (int)(ch - dma_chan);
|
|
+ u8 *buffer;
|
|
+ int byte_offset;
|
|
+ struct rx_desc *rx_desc_p;
|
|
+ struct tx_desc *tx_desc_p;
|
|
+
|
|
+ if (ch->control == LTQ_DMA_CH_ON)
|
|
+ return -1;
|
|
+
|
|
+ if (ch->dir == DIR_RX) {
|
|
+ for (j = 0; j < ch->desc_len; j++) {
|
|
+ rx_desc_p = (struct rx_desc *)ch->desc_base+j;
|
|
+ buffer = ch->dma_dev->buffer_alloc(ch->packet_size,
|
|
+ &byte_offset,
|
|
+ (void *)&ch->opt[j]);
|
|
+ if (!buffer)
|
|
+ return -ENOBUFS;
|
|
+
|
|
+ rx_desc_p->data_pointer = (u32)CPHYSADDR((u32)buffer);
|
|
+ rx_desc_p->status.word = 0;
|
|
+ rx_desc_p->status.field.byte_offset = byte_offset;
|
|
+ rx_desc_p->status.field.data_length = ch->packet_size;
|
|
+ rx_desc_p->status.field.own = DMA_OWN;
|
|
+ }
|
|
+ } else {
|
|
+ for (j = 0; j < ch->desc_len; j++) {
|
|
+ tx_desc_p = (struct tx_desc *)ch->desc_base + j;
|
|
+ tx_desc_p->data_pointer = 0;
|
|
+ tx_desc_p->status.word = 0;
|
|
+ }
|
|
+ }
|
|
+ ch->xfer_cnt = 0;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32(ch->desc_len, cdlen);
|
|
+ dma_w32(0x7e, cis);
|
|
+ dma_w32(DMA_CCTRL_TXWGT_VAL(ch->tx_weight)
|
|
+ | DMA_CCTRL_CLASS_VAL(ch->pri)
|
|
+ | (ch->dir == DIR_RX ? DMA_CCTRL_ON_OFF : 0), cctrl);
|
|
+ mbs_release();
|
|
+ ch->control = LTQ_DMA_CH_ON;
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ if (request_irq(ch->irq, dma_interrupt,
|
|
+ IRQF_DISABLED, "dma-core", (void *)ch) != 0) {
|
|
+ printk(KERN_ERR "error, cannot get dma_irq!\n");
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ enable_ch_irq(ch);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int close_chan(struct dma_channel_info *ch)
|
|
+{
|
|
+ unsigned long flag;
|
|
+ int j;
|
|
+ int chan_no = (int)(ch - dma_chan);
|
|
+ struct rx_desc *desc_p;
|
|
+
|
|
+ if (ch->control == LTQ_DMA_CH_OFF)
|
|
+ return -1;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32_mask(DMA_CCTRL_ON_OFF, 0, cctrl);
|
|
+ mbs_release();
|
|
+ disable_ch_irq(ch);
|
|
+ free_irq(ch->irq, (void *)ch);
|
|
+ ch->control = LTQ_DMA_CH_OFF;
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ /* free descriptors in use */
|
|
+ for (j = 0; j < ch->desc_len; j++) {
|
|
+ desc_p = (struct rx_desc *)ch->desc_base+j;
|
|
+ if ((desc_p->status.field.own == CPU_OWN &&
|
|
+ desc_p->status.field.c) ||
|
|
+ (desc_p->status.field.own == DMA_OWN)) {
|
|
+ if (desc_p->data_pointer) {
|
|
+ ch->dma_dev->buffer_free((u8 *)__va(desc_p->data_pointer),
|
|
+ (void *)ch->opt[j]);
|
|
+ desc_p->data_pointer = (u32)NULL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int reset_chan(struct dma_channel_info *ch)
|
|
+{
|
|
+ unsigned long flag;
|
|
+ int val;
|
|
+ int chan_no = (int)(ch - dma_chan);
|
|
+
|
|
+ close_chan(ch);
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32_mask(0, DMA_CCTRL_RST, cctrl);
|
|
+ mbs_release();
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ do {
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ val = dma_r32(cctrl);
|
|
+ mbs_release();
|
|
+ local_irq_restore(flag);
|
|
+ } while (val & DMA_CCTRL_RST);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline void rx_chan_intr_handler(int chan_no)
|
|
+{
|
|
+ struct dma_device_info *dma_dev = (struct dma_device_info *)
|
|
+ dma_chan[chan_no].dma_dev;
|
|
+ struct dma_channel_info *ch = &dma_chan[chan_no];
|
|
+ struct rx_desc *rx_desc_p;
|
|
+ unsigned long flag;
|
|
+ u32 val;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ val = dma_r32(cis);
|
|
+ dma_w32(DMA_CIS_DESCPT, cis);
|
|
+ mbs_release();
|
|
+
|
|
+ /* handle command complete interrupt */
|
|
+ rx_desc_p = (struct rx_desc *)ch->desc_base + ch->curr_desc;
|
|
+ if ((rx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) ==
|
|
+ DMA_DESC_CPT_SET) {
|
|
+ local_irq_restore(flag);
|
|
+ /* Every thing is correct, then we inform the upper layer */
|
|
+ dma_dev->current_rx_chan = ch->rel_chan_no;
|
|
+ if (dma_dev->intr_handler)
|
|
+ dma_dev->intr_handler(dma_dev, RCV_INT);
|
|
+ ch->weight--;
|
|
+ } else {
|
|
+ g_dma_int_status &= ~(1 << chan_no);
|
|
+ local_irq_restore(flag);
|
|
+ svip_enable_irq(dma_chan[chan_no].irq);
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void tx_chan_intr_handler(int chan_no)
|
|
+{
|
|
+ struct dma_device_info *dma_dev = (struct dma_device_info *)
|
|
+ dma_chan[chan_no].dma_dev;
|
|
+ struct dma_channel_info *ch = &dma_chan[chan_no];
|
|
+ struct tx_desc *tx_desc_p;
|
|
+ unsigned long flag;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32(DMA_CIS_DESCPT, cis);
|
|
+ mbs_release();
|
|
+
|
|
+ tx_desc_p = (struct tx_desc *)ch->desc_base+ch->prev_desc;
|
|
+ if ((tx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) ==
|
|
+ DMA_DESC_CPT_SET) {
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ dma_dev->buffer_free((u8 *)__va(tx_desc_p->data_pointer),
|
|
+ ch->opt[ch->prev_desc]);
|
|
+ memset(tx_desc_p, 0, sizeof(struct tx_desc));
|
|
+ dma_dev->current_tx_chan = ch->rel_chan_no;
|
|
+ if (dma_dev->intr_handler)
|
|
+ dma_dev->intr_handler(dma_dev, TRANSMIT_CPT_INT);
|
|
+ ch->weight--;
|
|
+
|
|
+ ch->prev_desc = (ch->prev_desc + 1) % (ch->desc_len);
|
|
+ } else {
|
|
+ g_dma_int_status &= ~(1 << chan_no);
|
|
+ local_irq_restore(flag);
|
|
+ svip_enable_irq(dma_chan[chan_no].irq);
|
|
+ }
|
|
+}
|
|
+
|
|
+void do_dma_tasklet(unsigned long unused)
|
|
+{
|
|
+ int i;
|
|
+ int chan_no = 0;
|
|
+ int budget = DMA_INT_BUDGET;
|
|
+ int weight = 0;
|
|
+ unsigned long flag;
|
|
+
|
|
+ while (g_dma_int_status) {
|
|
+ if (budget-- < 0) {
|
|
+ tasklet_schedule(&dma_tasklet);
|
|
+ return;
|
|
+ }
|
|
+ chan_no = -1;
|
|
+ weight = 0;
|
|
+ /* WFQ algorithm to select the channel */
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
|
|
+ if (g_dma_int_status & (1 << i) &&
|
|
+ dma_chan[i].weight > 0) {
|
|
+ if (dma_chan[i].weight > weight) {
|
|
+ chan_no = i;
|
|
+ weight = dma_chan[chan_no].weight;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (chan_no >= 0) {
|
|
+ if (dma_chan[chan_no].dir == DIR_RX)
|
|
+ rx_chan_intr_handler(chan_no);
|
|
+ else
|
|
+ tx_chan_intr_handler(chan_no);
|
|
+ } else {
|
|
+ /* reset all the channels */
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
|
|
+ dma_chan[i].weight = dma_chan[i].default_weight;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ g_dma_in_process = 0;
|
|
+ if (g_dma_int_status) {
|
|
+ g_dma_in_process = 1;
|
|
+ tasklet_schedule(&dma_tasklet);
|
|
+ }
|
|
+ local_irq_restore(flag);
|
|
+}
|
|
+
|
|
+irqreturn_t dma_interrupt(int irq, void *dev_id)
|
|
+{
|
|
+ struct dma_channel_info *ch;
|
|
+ int chan_no = 0;
|
|
+
|
|
+ ch = (struct dma_channel_info *)dev_id;
|
|
+ chan_no = (int)(ch - dma_chan);
|
|
+
|
|
+ if ((unsigned)chan_no >= LTQ_MAX_DMA_CHANNEL_NUM) {
|
|
+ printk(KERN_ERR "error: dma_interrupt irq=%d chan_no=%d\n",
|
|
+ irq, chan_no);
|
|
+ }
|
|
+
|
|
+ g_dma_int_status |= 1 << chan_no;
|
|
+ dma_w32(1 << chan_no, irncr);
|
|
+ mask_and_ack_irq(irq);
|
|
+
|
|
+ if (!g_dma_in_process) {
|
|
+ g_dma_in_process = 1;
|
|
+ tasklet_schedule(&dma_tasklet);
|
|
+ }
|
|
+
|
|
+ return IRQ_RETVAL(1);
|
|
+}
|
|
+
|
|
+struct dma_device_info *dma_device_reserve(char *dev_name)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ ltq_dma_init();
|
|
+ for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
|
|
+ if (strcmp(dev_name, dma_devices[i].device_name) == 0) {
|
|
+ if (dma_devices[i].reserved)
|
|
+ return NULL;
|
|
+ dma_devices[i].reserved = 1;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (i == LTQ_MAX_DMA_DEVICE_NUM)
|
|
+ return NULL;
|
|
+
|
|
+ return &dma_devices[i];
|
|
+}
|
|
+EXPORT_SYMBOL(dma_device_reserve);
|
|
+
|
|
+int dma_device_release(struct dma_device_info *dma_dev)
|
|
+{
|
|
+ dma_dev->reserved = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(dma_device_release);
|
|
+
|
|
+int dma_device_register(struct dma_device_info *dma_dev)
|
|
+{
|
|
+ int port_no = (int)(dma_dev - dma_devices);
|
|
+ int txbl, rxbl;
|
|
+ unsigned long flag;
|
|
+
|
|
+ switch (dma_dev->tx_burst_len) {
|
|
+ case 8:
|
|
+ txbl = 3;
|
|
+ break;
|
|
+ case 4:
|
|
+ txbl = 2;
|
|
+ break;
|
|
+ default:
|
|
+ txbl = 1;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ switch (dma_dev->rx_burst_len) {
|
|
+ case 8:
|
|
+ rxbl = 3;
|
|
+ break;
|
|
+ case 4:
|
|
+ rxbl = 2;
|
|
+ break;
|
|
+ default:
|
|
+ rxbl = 1;
|
|
+ }
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(port_no, ps);
|
|
+ dma_w32(DMA_PCTRL_TXWGT_VAL(dma_dev->tx_weight)
|
|
+ | DMA_PCTRL_TXENDI_VAL(dma_dev->tx_endianness_mode)
|
|
+ | DMA_PCTRL_RXENDI_VAL(dma_dev->rx_endianness_mode)
|
|
+ | DMA_PCTRL_PDEN_VAL(dma_dev->drop_enable)
|
|
+ | DMA_PCTRL_TXBL_VAL(txbl)
|
|
+ | DMA_PCTRL_RXBL_VAL(rxbl), pctrl);
|
|
+ mbs_release();
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(dma_device_register);
|
|
+
|
|
+int dma_device_unregister(struct dma_device_info *dma_dev)
|
|
+{
|
|
+ int i;
|
|
+ int port_no = (int)(dma_dev - dma_devices);
|
|
+ unsigned long flag;
|
|
+
|
|
+ /* flush memcopy module; has no effect for other ports */
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(port_no, ps);
|
|
+ dma_w32_mask(0, DMA_PCTRL_GPC, pctrl);
|
|
+ mbs_release();
|
|
+ local_irq_restore(flag);
|
|
+
|
|
+ for (i = 0; i < dma_dev->max_tx_chan_num; i++)
|
|
+ reset_chan(dma_dev->tx_chan[i]);
|
|
+
|
|
+ for (i = 0; i < dma_dev->max_rx_chan_num; i++)
|
|
+ reset_chan(dma_dev->rx_chan[i]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(dma_device_unregister);
|
|
+
|
|
+/**
|
|
+ * Read Packet from DMA Rx channel.
|
|
+ * The function gets the data from the current rx descriptor assigned
|
|
+ * to the passed DMA device and passes it back to the caller.
|
|
+ * The function is called in the context of DMA interrupt.
|
|
+ * In detail the following actions are done:
|
|
+ * - get current receive descriptor
|
|
+ * - allocate memory via allocation callback function
|
|
+ * - pass data from descriptor to allocated memory
|
|
+ * - update channel weight
|
|
+ * - release descriptor
|
|
+ * - update current descriptor position
|
|
+ *
|
|
+ * \param *dma_dev - pointer to DMA device structure
|
|
+ * \param **dataptr - pointer to received data
|
|
+ * \param **opt
|
|
+ * \return packet length - length of received data
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int dma_device_read(struct dma_device_info *dma_dev, u8 **dataptr, void **opt)
|
|
+{
|
|
+ u8 *buf;
|
|
+ int len;
|
|
+ int byte_offset = 0;
|
|
+ void *p = NULL;
|
|
+
|
|
+ struct dma_channel_info *ch =
|
|
+ dma_dev->rx_chan[dma_dev->current_rx_chan];
|
|
+
|
|
+ struct rx_desc *rx_desc_p;
|
|
+
|
|
+ /* get the rx data first */
|
|
+ rx_desc_p = (struct rx_desc *)ch->desc_base+ch->curr_desc;
|
|
+ buf = (u8 *)__va(rx_desc_p->data_pointer);
|
|
+ *(u32 *)dataptr = (u32)buf;
|
|
+ len = rx_desc_p->status.field.data_length;
|
|
+#ifndef CONFIG_MIPS_UNCACHED
|
|
+ dma_cache_inv((unsigned long)buf, len);
|
|
+#endif
|
|
+ if (opt)
|
|
+ *(int *)opt = (int)ch->opt[ch->curr_desc];
|
|
+
|
|
+ /* replace with a new allocated buffer */
|
|
+ buf = dma_dev->buffer_alloc(ch->packet_size, &byte_offset, &p);
|
|
+ if (buf) {
|
|
+ ch->opt[ch->curr_desc] = p;
|
|
+
|
|
+ wmb();
|
|
+ rx_desc_p->data_pointer = (u32)CPHYSADDR((u32)buf);
|
|
+ rx_desc_p->status.word = (DMA_OWN << 31) \
|
|
+ |(byte_offset << 23) \
|
|
+ | ch->packet_size;
|
|
+
|
|
+ wmb();
|
|
+ } else {
|
|
+ *(u32 *)dataptr = 0;
|
|
+ if (opt)
|
|
+ *(int *)opt = 0;
|
|
+ }
|
|
+
|
|
+ ch->xfer_cnt++;
|
|
+ /* increase the curr_desc pointer */
|
|
+ ch->curr_desc++;
|
|
+ if (ch->curr_desc == ch->desc_len)
|
|
+ ch->curr_desc = 0;
|
|
+ /* return the length of the received packet */
|
|
+ return len;
|
|
+}
|
|
+EXPORT_SYMBOL(dma_device_read);
|
|
+
|
|
+/**
|
|
+ * Write Packet through DMA Tx channel to peripheral.
|
|
+ *
|
|
+ * \param *dma_dev - pointer to DMA device structure
|
|
+ * \param *dataptr - pointer to data to be sent
|
|
+ * \param len - amount of data bytes to be sent
|
|
+ * \param *opt
|
|
+ * \return len - length of transmitted data
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int dma_device_write(struct dma_device_info *dma_dev, u8 *dataptr, int len,
|
|
+ void *opt)
|
|
+{
|
|
+ unsigned long flag;
|
|
+ u32 byte_offset;
|
|
+ struct dma_channel_info *ch;
|
|
+ int chan_no;
|
|
+ struct tx_desc *tx_desc_p;
|
|
+ local_irq_save(flag);
|
|
+
|
|
+ ch = dma_dev->tx_chan[dma_dev->current_tx_chan];
|
|
+ chan_no = (int)(ch - dma_chan);
|
|
+
|
|
+ if (ch->control == LTQ_DMA_CH_OFF) {
|
|
+ local_irq_restore(flag);
|
|
+ printk(KERN_ERR "%s: dma channel %d not enabled!\n",
|
|
+ __func__, chan_no);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ tx_desc_p = (struct tx_desc *)ch->desc_base+ch->curr_desc;
|
|
+ /* Check whether this descriptor is available */
|
|
+ if (tx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) {
|
|
+ /* if not , the tell the upper layer device */
|
|
+ dma_dev->intr_handler(dma_dev, TX_BUF_FULL_INT);
|
|
+ local_irq_restore(flag);
|
|
+ return 0;
|
|
+ }
|
|
+ ch->opt[ch->curr_desc] = opt;
|
|
+ /* byte offset----to adjust the starting address of the data buffer,
|
|
+ * should be multiple of the burst length.*/
|
|
+ byte_offset = ((u32)CPHYSADDR((u32)dataptr)) %
|
|
+ (dma_dev->tx_burst_len * 4);
|
|
+#ifndef CONFIG_MIPS_UNCACHED
|
|
+ dma_cache_wback((unsigned long)dataptr, len);
|
|
+ wmb();
|
|
+#endif
|
|
+ tx_desc_p->data_pointer = (u32)CPHYSADDR((u32)dataptr) - byte_offset;
|
|
+ wmb();
|
|
+ tx_desc_p->status.word = (DMA_OWN << 31)
|
|
+ | DMA_DESC_SOP_SET
|
|
+ | DMA_DESC_EOP_SET
|
|
+ | (byte_offset << 23)
|
|
+ | len;
|
|
+ wmb();
|
|
+
|
|
+ if (ch->xfer_cnt == 0) {
|
|
+ mbs_grab();
|
|
+ dma_w32(chan_no, cs);
|
|
+ dma_w32_mask(0, DMA_CCTRL_ON_OFF, cctrl);
|
|
+ mbs_release();
|
|
+ }
|
|
+
|
|
+ ch->xfer_cnt++;
|
|
+ ch->curr_desc++;
|
|
+ if (ch->curr_desc == ch->desc_len)
|
|
+ ch->curr_desc = 0;
|
|
+
|
|
+ local_irq_restore(flag);
|
|
+ return len;
|
|
+}
|
|
+EXPORT_SYMBOL(dma_device_write);
|
|
+
|
|
+/**
|
|
+ * Display descriptor list via proc file
|
|
+ *
|
|
+ * \param chan_no - logical channel number
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int desc_list_proc_read(char *buf, char **start, off_t offset,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ int len = 0;
|
|
+ int i;
|
|
+ static int chan_no;
|
|
+ u32 *p;
|
|
+
|
|
+ if ((chan_no == 0) && (offset > count)) {
|
|
+ *eof = 1;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (chan_no != 0) {
|
|
+ *start = buf;
|
|
+ } else {
|
|
+ buf = buf + offset;
|
|
+ *start = buf;
|
|
+ }
|
|
+
|
|
+ p = (u32 *)dma_chan[chan_no].desc_base;
|
|
+
|
|
+ if (dma_chan[chan_no].dir == DIR_RX)
|
|
+ len += sprintf(buf + len,
|
|
+ "channel %d %s Rx descriptor list:\n",
|
|
+ chan_no, dma_chan[chan_no].dma_dev->device_name);
|
|
+ else
|
|
+ len += sprintf(buf + len,
|
|
+ "channel %d %s Tx descriptor list:\n",
|
|
+ chan_no, dma_chan[chan_no].dma_dev->device_name);
|
|
+ len += sprintf(buf + len,
|
|
+ " no address data pointer command bits "
|
|
+ "(Own, Complete, SoP, EoP, Offset) \n");
|
|
+ len += sprintf(buf + len,
|
|
+ "----------------------------------------------"
|
|
+ "-----------------------------------\n");
|
|
+ for (i = 0; i < dma_chan[chan_no].desc_len; i++) {
|
|
+ len += sprintf(buf + len, "%3d ", i);
|
|
+ len += sprintf(buf + len, "0x%08x ", (u32)(p + (i * 2)));
|
|
+ len += sprintf(buf + len, "%08x ", *(p + (i * 2 + 1)));
|
|
+ len += sprintf(buf + len, "%08x ", *(p + (i * 2)));
|
|
+
|
|
+ if (*(p + (i * 2)) & 0x80000000)
|
|
+ len += sprintf(buf + len, "D ");
|
|
+ else
|
|
+ len += sprintf(buf + len, "C ");
|
|
+ if (*(p + (i * 2)) & 0x40000000)
|
|
+ len += sprintf(buf + len, "C ");
|
|
+ else
|
|
+ len += sprintf(buf + len, "c ");
|
|
+ if (*(p + (i * 2)) & 0x20000000)
|
|
+ len += sprintf(buf + len, "S ");
|
|
+ else
|
|
+ len += sprintf(buf + len, "s ");
|
|
+ if (*(p + (i * 2)) & 0x10000000)
|
|
+ len += sprintf(buf + len, "E ");
|
|
+ else
|
|
+ len += sprintf(buf + len, "e ");
|
|
+
|
|
+ /* byte offset is different for rx and tx descriptors*/
|
|
+ if (dma_chan[chan_no].dir == DIR_RX) {
|
|
+ len += sprintf(buf + len, "%01x ",
|
|
+ (*(p + (i * 2)) & 0x01800000) >> 23);
|
|
+ } else {
|
|
+ len += sprintf(buf + len, "%02x ",
|
|
+ (*(p + (i * 2)) & 0x0F800000) >> 23);
|
|
+ }
|
|
+
|
|
+ if (dma_chan[chan_no].curr_desc == i)
|
|
+ len += sprintf(buf + len, "<- CURR");
|
|
+
|
|
+ if (dma_chan[chan_no].prev_desc == i)
|
|
+ len += sprintf(buf + len, "<- PREV");
|
|
+
|
|
+ len += sprintf(buf + len, "\n");
|
|
+
|
|
+ }
|
|
+
|
|
+ len += sprintf(buf + len, "\n");
|
|
+ chan_no++;
|
|
+ if (chan_no > LTQ_MAX_DMA_CHANNEL_NUM - 1)
|
|
+ chan_no = 0;
|
|
+
|
|
+ *eof = 1;
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Displays the weight of all DMA channels via proc file
|
|
+ *
|
|
+ *
|
|
+ *
|
|
+ * \param *buf
|
|
+ * \param **start
|
|
+ * \param offset
|
|
+ * \param count
|
|
+ * \param *eof
|
|
+ * \param *data
|
|
+ * \return len - amount of bytes written to file
|
|
+ */
|
|
+int channel_weight_proc_read(char *buf, char **start, off_t offset,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ int i;
|
|
+ int len = 0;
|
|
+ len += sprintf(buf + len, "Qos dma channel weight list\n");
|
|
+ len += sprintf(buf + len, "channel_num default_weight "
|
|
+ "current_weight device Tx/Rx\n");
|
|
+ len += sprintf(buf + len, "---------------------------"
|
|
+ "---------------------------------\n");
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
|
|
+ struct dma_channel_info *ch = &dma_chan[i];
|
|
+
|
|
+ if (ch->dir == DIR_RX) {
|
|
+ len += sprintf(buf + len,
|
|
+ " %2d %08x "
|
|
+ "%08x %10s Rx\n",
|
|
+ i, ch->default_weight, ch->weight,
|
|
+ ch->dma_dev->device_name);
|
|
+ } else {
|
|
+ len += sprintf(buf + len,
|
|
+ " %2d %08x "
|
|
+ "%08x %10s Tx\n",
|
|
+ i, ch->default_weight, ch->weight,
|
|
+ ch->dma_dev->device_name);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Provides DMA Register Content to proc file
|
|
+ * This function reads the content of general DMA Registers, DMA Channel
|
|
+ * Registers and DMA Port Registers and performs a structures output to the
|
|
+ * DMA proc file
|
|
+ *
|
|
+ * \param *buf
|
|
+ * \param **start
|
|
+ * \param offset
|
|
+ * \param count
|
|
+ * \param *eof
|
|
+ * \param *data
|
|
+ * \return len - amount of bytes written to file
|
|
+ */
|
|
+int dma_register_proc_read(char *buf, char **start, off_t offset,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ int len = 0;
|
|
+ int i;
|
|
+ int limit = count;
|
|
+ unsigned long flags;
|
|
+ static int blockcount;
|
|
+ static int channel_no;
|
|
+
|
|
+ if ((blockcount == 0) && (offset > count)) {
|
|
+ *eof = 1;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ switch (blockcount) {
|
|
+ case 0:
|
|
+ len += sprintf(buf + len, "\nGeneral DMA Registers\n");
|
|
+ len += sprintf(buf + len, "-------------------------"
|
|
+ "----------------\n");
|
|
+ len += sprintf(buf + len, "CLC= %08x\n", dma_r32(clc));
|
|
+ len += sprintf(buf + len, "ID= %08x\n", dma_r32(id));
|
|
+ len += sprintf(buf + len, "DMA_CPOLL= %08x\n", dma_r32(cpoll));
|
|
+ len += sprintf(buf + len, "DMA_CS= %08x\n", dma_r32(cs));
|
|
+ len += sprintf(buf + len, "DMA_PS= %08x\n", dma_r32(ps));
|
|
+ len += sprintf(buf + len, "DMA_IRNEN= %08x\n", dma_r32(irnen));
|
|
+ len += sprintf(buf + len, "DMA_IRNCR= %08x\n", dma_r32(irncr));
|
|
+ len += sprintf(buf + len, "DMA_IRNICR= %08x\n",
|
|
+ dma_r32(irnicr));
|
|
+ len += sprintf(buf + len, "\nDMA Channel Registers\n");
|
|
+ blockcount = 1;
|
|
+ return len;
|
|
+ break;
|
|
+ case 1:
|
|
+ /* If we had an overflow start at beginning of buffer
|
|
+ * otherwise use offset */
|
|
+ if (channel_no != 0) {
|
|
+ *start = buf;
|
|
+ } else {
|
|
+ buf = buf + offset;
|
|
+ *start = buf;
|
|
+ }
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ for (i = channel_no; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
|
|
+ struct dma_channel_info *ch = &dma_chan[i];
|
|
+
|
|
+ if (len + 300 > limit) {
|
|
+ local_irq_restore(flags);
|
|
+ channel_no = i;
|
|
+ blockcount = 1;
|
|
+ return len;
|
|
+ }
|
|
+ len += sprintf(buf + len, "----------------------"
|
|
+ "-------------------\n");
|
|
+ if (ch->dir == DIR_RX) {
|
|
+ len += sprintf(buf + len,
|
|
+ "Channel %d - Device %s Rx\n",
|
|
+ i, ch->dma_dev->device_name);
|
|
+ } else {
|
|
+ len += sprintf(buf + len,
|
|
+ "Channel %d - Device %s Tx\n",
|
|
+ i, ch->dma_dev->device_name);
|
|
+ }
|
|
+ dma_w32(i, cs);
|
|
+ len += sprintf(buf + len, "DMA_CCTRL= %08x\n",
|
|
+ dma_r32(cctrl));
|
|
+ len += sprintf(buf + len, "DMA_CDBA= %08x\n",
|
|
+ dma_r32(cdba));
|
|
+ len += sprintf(buf + len, "DMA_CIE= %08x\n",
|
|
+ dma_r32(cie));
|
|
+ len += sprintf(buf + len, "DMA_CIS= %08x\n",
|
|
+ dma_r32(cis));
|
|
+ len += sprintf(buf + len, "DMA_CDLEN= %08x\n",
|
|
+ dma_r32(cdlen));
|
|
+ }
|
|
+ local_irq_restore(flags);
|
|
+ blockcount = 2;
|
|
+ channel_no = 0;
|
|
+ return len;
|
|
+ break;
|
|
+ case 2:
|
|
+ *start = buf;
|
|
+ /*
|
|
+ * display port dependent registers
|
|
+ */
|
|
+ len += sprintf(buf + len, "\nDMA Port Registers\n");
|
|
+ len += sprintf(buf + len,
|
|
+ "-----------------------------------------\n");
|
|
+ local_irq_save(flags);
|
|
+ for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
|
|
+ dma_w32(i, ps);
|
|
+ len += sprintf(buf + len,
|
|
+ "Port %d DMA_PCTRL= %08x\n",
|
|
+ i, dma_r32(pctrl));
|
|
+ }
|
|
+ local_irq_restore(flags);
|
|
+ blockcount = 0;
|
|
+ *eof = 1;
|
|
+ return len;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ blockcount = 0;
|
|
+ *eof = 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Open Method of DMA Device Driver
|
|
+ * This function increments the device driver's use counter.
|
|
+ *
|
|
+ *
|
|
+ * \param
|
|
+ * \return
|
|
+ */
|
|
+static int dma_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Release Method of DMA Device driver.
|
|
+ * This function decrements the device driver's use counter.
|
|
+ *
|
|
+ *
|
|
+ * \param
|
|
+ * \return
|
|
+ */
|
|
+static int dma_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ /* release the resources */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Ioctl Interface to DMA Module
|
|
+ *
|
|
+ * \param None
|
|
+ * \return 0 - initialization successful
|
|
+ * <0 - failed initialization
|
|
+ */
|
|
+static long dma_ioctl(struct file *file,
|
|
+ unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ int result = 0;
|
|
+ /* TODO: add some user controled functions here */
|
|
+ return result;
|
|
+}
|
|
+
|
|
+const static struct file_operations dma_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = dma_open,
|
|
+ .release = dma_release,
|
|
+ .unlocked_ioctl = dma_ioctl,
|
|
+};
|
|
+
|
|
+void map_dma_chan(struct dma_channel_info *map)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /* assign default values for channel settings */
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
|
|
+ dma_chan[i].byte_offset = 0;
|
|
+ dma_chan[i].open = &open_chan;
|
|
+ dma_chan[i].close = &close_chan;
|
|
+ dma_chan[i].reset = &reset_chan;
|
|
+ dma_chan[i].enable_irq = enable_ch_irq;
|
|
+ dma_chan[i].disable_irq = disable_ch_irq;
|
|
+ dma_chan[i].tx_weight = 1;
|
|
+ dma_chan[i].control = 0;
|
|
+ dma_chan[i].default_weight = LTQ_DMA_CH_DEFAULT_WEIGHT;
|
|
+ dma_chan[i].weight = dma_chan[i].default_weight;
|
|
+ dma_chan[i].curr_desc = 0;
|
|
+ dma_chan[i].prev_desc = 0;
|
|
+ }
|
|
+
|
|
+ /* assign default values for port settings */
|
|
+ for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
|
|
+ /*set default tx channel number to be one*/
|
|
+ dma_devices[i].num_tx_chan = 1;
|
|
+ /*set default rx channel number to be one*/
|
|
+ dma_devices[i].num_rx_chan = 1;
|
|
+ dma_devices[i].buffer_alloc = common_buffer_alloc;
|
|
+ dma_devices[i].buffer_free = common_buffer_free;
|
|
+ dma_devices[i].intr_handler = NULL;
|
|
+ dma_devices[i].tx_burst_len = 4;
|
|
+ dma_devices[i].rx_burst_len = 4;
|
|
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
+ dma_devices[i].tx_endianness_mode = 0;
|
|
+ dma_devices[i].rx_endianness_mode = 0;
|
|
+#else
|
|
+ dma_devices[i].tx_endianness_mode = 3;
|
|
+ dma_devices[i].rx_endianness_mode = 3;
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+
|
|
+void dma_chip_init(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ sys1_w32(SYS1_CLKENR_DMA, clkenr);
|
|
+ wmb();
|
|
+ /* reset DMA */
|
|
+ dma_w32(DMA_CTRL_RST, ctrl);
|
|
+ wmb();
|
|
+ /* disable all the interrupts first */
|
|
+ dma_w32(0, irnen);
|
|
+
|
|
+ /* enable polling for all channels */
|
|
+ dma_w32(DMA_CPOLL_EN | DMA_CPOLL_CNT_VAL(DMA_POLL_COUNTER), cpoll);
|
|
+
|
|
+ /****************************************************/
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
|
|
+ disable_ch_irq(&dma_chan[i]);
|
|
+}
|
|
+
|
|
+int ltq_dma_init(void)
|
|
+{
|
|
+ int result = 0;
|
|
+ int i;
|
|
+ unsigned long flag;
|
|
+ static int dma_initialized;
|
|
+
|
|
+ if (dma_initialized == 1)
|
|
+ return 0;
|
|
+ dma_initialized = 1;
|
|
+
|
|
+ result = register_chrdev(DMA_MAJOR, "dma-core", &dma_fops);
|
|
+ if (result) {
|
|
+ DMA_EMSG("cannot register device dma-core!\n");
|
|
+ return result;
|
|
+ }
|
|
+
|
|
+ dma_chip_init();
|
|
+ map_dma_chan(dma_chan);
|
|
+
|
|
+ /* allocate DMA memory for buffer descriptors */
|
|
+ for (i = 0; i < DMA_DESCR_MEM_PAGES; i++) {
|
|
+ g_desc_list[i] = (u64 *)__get_free_page(GFP_DMA);
|
|
+ if (g_desc_list[i] == NULL) {
|
|
+ DMA_EMSG("no memory for desriptor\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ g_desc_list[i] = (u64 *)KSEG1ADDR(g_desc_list[i]);
|
|
+ memset(g_desc_list[i], 0, PAGE_SIZE);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
|
|
+ int page_index, ch_per_page;
|
|
+ /* cross-link relative channels of a port to
|
|
+ * corresponding absolute channels */
|
|
+ if (dma_chan[i].dir == DIR_RX) {
|
|
+ ((struct dma_device_info *)(dma_chan[i].dma_dev))->
|
|
+ rx_chan[dma_chan[i].rel_chan_no] = &dma_chan[i];
|
|
+ } else {
|
|
+ ((struct dma_device_info *)(dma_chan[i].dma_dev))->
|
|
+ tx_chan[dma_chan[i].rel_chan_no] = &dma_chan[i];
|
|
+ }
|
|
+ dma_chan[i].abs_chan_no = i;
|
|
+
|
|
+ page_index = i * DMA_DESCR_CH_SIZE / PAGE_SIZE;
|
|
+ ch_per_page = PAGE_SIZE / DMA_DESCR_CH_SIZE +
|
|
+ ((PAGE_SIZE % DMA_DESCR_CH_SIZE) > 0);
|
|
+ dma_chan[i].desc_base =
|
|
+ (u32)g_desc_list[page_index] +
|
|
+ (i - page_index*ch_per_page) * DMA_DESCR_NUM*8;
|
|
+ dma_chan[i].curr_desc = 0;
|
|
+ dma_chan[i].desc_len = DMA_DESCR_NUM;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+ mbs_grab();
|
|
+ dma_w32(i, cs);
|
|
+ dma_w32((u32)CPHYSADDR(dma_chan[i].desc_base), cdba);
|
|
+ mbs_release();
|
|
+ local_irq_restore(flag);
|
|
+ }
|
|
+
|
|
+ g_dma_dir = proc_mkdir("driver/" DRV_NAME, NULL);
|
|
+
|
|
+ create_proc_read_entry("dma_register",
|
|
+ 0,
|
|
+ g_dma_dir,
|
|
+ dma_register_proc_read,
|
|
+ NULL);
|
|
+
|
|
+ create_proc_read_entry("g_desc_list",
|
|
+ 0,
|
|
+ g_dma_dir,
|
|
+ desc_list_proc_read,
|
|
+ NULL);
|
|
+
|
|
+ create_proc_read_entry("channel_weight",
|
|
+ 0,
|
|
+ g_dma_dir,
|
|
+ channel_weight_proc_read,
|
|
+ NULL);
|
|
+
|
|
+ printk(KERN_NOTICE "SVIP DMA engine initialized\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * Cleanup DMA device
|
|
+ * This function releases all resources used by the DMA device driver on
|
|
+ * module removal.
|
|
+ *
|
|
+ *
|
|
+ * \param None
|
|
+ * \return Nothing
|
|
+ */
|
|
+void dma_cleanup(void)
|
|
+{
|
|
+ int i;
|
|
+ unregister_chrdev(DMA_MAJOR, "dma-core");
|
|
+
|
|
+ for (i = 0; i < DMA_DESCR_MEM_PAGES; i++)
|
|
+ free_page(KSEG0ADDR((unsigned long)g_desc_list[i]));
|
|
+ remove_proc_entry("channel_weight", g_dma_dir);
|
|
+ remove_proc_entry("g_desc_list", g_dma_dir);
|
|
+ remove_proc_entry("dma_register", g_dma_dir);
|
|
+ remove_proc_entry("driver/" DRV_NAME, NULL);
|
|
+ /* release the resources */
|
|
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
|
|
+ free_irq(dma_chan[i].irq, (void *)&dma_chan[i]);
|
|
+}
|
|
+
|
|
+arch_initcall(ltq_dma_init);
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
Index: linux-3.3.8/drivers/net/ethernet/svip_eth.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/drivers/net/ethernet/svip_eth.c 2012-07-31 20:14:58.881165968 +0200
|
|
@@ -0,0 +1,636 @@
|
|
+/************************************************************************
|
|
+ *
|
|
+ * Copyright (c) 2005
|
|
+ * Infineon Technologies AG
|
|
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version
|
|
+ * 2 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ ************************************************************************/
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/ip.h>
|
|
+#include <linux/tcp.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/ethtool.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/delay.h>
|
|
+#include <asm/checksum.h>
|
|
+
|
|
+#if 1 /** TODO: MOVE TO APPROPRIATE PLACE */
|
|
+
|
|
+#define ETHERNET_PACKET_DMA_BUFFER_SIZE 0x600
|
|
+#define REV_MII_MODE 2
|
|
+
|
|
+#endif
|
|
+
|
|
+#define DRV_NAME "ifxmips_mii0"
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+#include <svip_dma.h>
|
|
+
|
|
+#ifdef CONFIG_DEBUG_MINI_BOOT
|
|
+#define IKOS_MINI_BOOT
|
|
+#endif
|
|
+
|
|
+/* debugging */
|
|
+#undef INCAIP2_SW_DUMP
|
|
+
|
|
+#define INCAIP2_SW_EMSG(fmt,args...) printk("%s: " fmt, __FUNCTION__ , ##args)
|
|
+
|
|
+#define INCAIP2_SW_CHIP_NO 1
|
|
+#define INCAIP2_SW_CHIP_ID 0
|
|
+#define INCAIP2_SW_DEVICE_NO 1
|
|
+
|
|
+#ifdef INCAIP2_SW_DEBUG_MSG
|
|
+#define INCAIP2_SW_DMSG(fmt,args...) printk("%s: " fmt, __FUNCTION__ , ##args)
|
|
+#else
|
|
+#define INCAIP2_SW_DMSG(fmt,args...)
|
|
+#endif
|
|
+
|
|
+/************************** Module Parameters *****************************/
|
|
+static char *mode = "bridge";
|
|
+module_param(mode, charp, 0000);
|
|
+MODULE_PARM_DESC(mode, "<description>");
|
|
+
|
|
+#ifdef HAVE_TX_TIMEOUT
|
|
+static int timeout = 10*HZ;
|
|
+module_param(timeout, int, 0);
|
|
+MODULE_PARM_DESC(timeout, "Transmission watchdog timeout in seconds>");
|
|
+#endif
|
|
+
|
|
+#ifdef IKOS_MINI_BOOT
|
|
+#ifdef CONFIG_INCAIP2
|
|
+extern s32 incaip2_sw_to_mbx(struct sk_buff* skb);
|
|
+#endif
|
|
+extern s32 svip_sw_to_mbx(struct sk_buff* skb);
|
|
+#endif
|
|
+
|
|
+struct svip_mii_priv {
|
|
+ struct net_device_stats stats;
|
|
+ struct dma_device_info *dma_device;
|
|
+ struct sk_buff *skb;
|
|
+};
|
|
+
|
|
+static struct net_device *svip_mii0_dev;
|
|
+static unsigned char mac_addr[MAX_ADDR_LEN];
|
|
+static unsigned char my_ethaddr[MAX_ADDR_LEN];
|
|
+
|
|
+/**
|
|
+ * Initialize MAC address.
|
|
+ * This function copies the ethernet address from kernel command line.
|
|
+ *
|
|
+ * \param line Pointer to parameter
|
|
+ * \return 0 OK
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+static int __init svip_eth_ethaddr_setup(char *line)
|
|
+{
|
|
+ char *ep;
|
|
+ int i;
|
|
+
|
|
+ memset(my_ethaddr, 0, MAX_ADDR_LEN);
|
|
+ /* there should really be routines to do this stuff */
|
|
+ for (i = 0; i < 6; i++)
|
|
+ {
|
|
+ my_ethaddr[i] = line ? simple_strtoul(line, &ep, 16) : 0;
|
|
+ if (line)
|
|
+ line = (*ep) ? ep+1 : ep;
|
|
+ }
|
|
+ INCAIP2_SW_DMSG("mac address %2x-%2x-%2x-%2x-%2x-%2x \n"
|
|
+ ,my_ethaddr[0]
|
|
+ ,my_ethaddr[1]
|
|
+ ,my_ethaddr[2]
|
|
+ ,my_ethaddr[3]
|
|
+ ,my_ethaddr[4]
|
|
+ ,my_ethaddr[5]);
|
|
+ return 0;
|
|
+}
|
|
+__setup("ethaddr=", svip_eth_ethaddr_setup);
|
|
+
|
|
+
|
|
+/**
|
|
+ * Open RX DMA channels.
|
|
+ * This function opens all DMA rx channels.
|
|
+ *
|
|
+ * \param dma_dev pointer to DMA device information
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+static void svip_eth_open_rx_dma(struct dma_device_info *dma_dev)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for(i=0; i<dma_dev->num_rx_chan; i++)
|
|
+ {
|
|
+ dma_dev->rx_chan[i]->open(dma_dev->rx_chan[i]);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Open TX DMA channels.
|
|
+ * This function opens all DMA tx channels.
|
|
+ *
|
|
+ * \param dev pointer to net device structure that comprises
|
|
+ * DMA device information pointed to by it's priv field.
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+static void svip_eth_open_tx_dma(struct dma_device_info *dma_dev)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i=0; i<dma_dev->num_tx_chan; i++)
|
|
+ {
|
|
+ dma_dev->tx_chan[i]->open(dma_dev->tx_chan[i]);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+#ifdef CONFIG_NET_HW_FLOWCONTROL
|
|
+/**
|
|
+ * Enable receiving DMA.
|
|
+ * This function enables the receiving DMA channel.
|
|
+ *
|
|
+ * \param dev pointer to net device structure that comprises
|
|
+ * DMA device information pointed to by it's priv field.
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+void svip_eth_xon(struct net_device *dev)
|
|
+{
|
|
+ struct switch_priv *sw_dev = (struct switch_priv *)dev->priv;
|
|
+ struct dma_device_info* dma_dev =
|
|
+ (struct dma_device_info *)sw_dev->dma_device;
|
|
+ unsigned long flag;
|
|
+
|
|
+ local_irq_save(flag);
|
|
+
|
|
+ INCAIP2_SW_DMSG("wakeup\n");
|
|
+ svip_eth_open_rx_dma(dma_dev);
|
|
+
|
|
+ local_irq_restore(flag);
|
|
+}
|
|
+#endif /* CONFIG_NET_HW_FLOWCONTROL */
|
|
+
|
|
+
|
|
+/**
|
|
+ * Open network device.
|
|
+ * This functions opens the network device and starts the interface queue.
|
|
+ *
|
|
+ * \param dev Device structure for Ethernet device
|
|
+ * \return 0 OK, device opened
|
|
+ * \return -1 Error, registering DMA device
|
|
+ * \ingroup API
|
|
+ */
|
|
+int svip_mii_open(struct net_device *dev)
|
|
+{
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+ struct dma_device_info *dma_dev = priv->dma_device;
|
|
+
|
|
+ svip_eth_open_rx_dma(dma_dev);
|
|
+ svip_eth_open_tx_dma(dma_dev);
|
|
+
|
|
+ netif_start_queue(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Close network device.
|
|
+ * This functions closes the network device, which will also stop the interface
|
|
+ * queue.
|
|
+ *
|
|
+ * \param dev Device structure for Ethernet device
|
|
+ * \return 0 OK, device closed (cannot fail)
|
|
+ * \ingroup API
|
|
+ */
|
|
+int svip_mii_release(struct net_device *dev)
|
|
+{
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+ struct dma_device_info *dma_dev = priv->dma_device;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < dma_dev->max_rx_chan_num; i++)
|
|
+ dma_dev->rx_chan[i]->close(dma_dev->rx_chan[i]);
|
|
+ netif_stop_queue(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Read data from DMA device.
|
|
+ * This function reads data from the DMA device. The function is called by
|
|
+ * the switch/DMA pseudo interrupt handler dma_intr_handler on occurence of
|
|
+ * a DMA receive interrupt.
|
|
+ *
|
|
+ * \param dev Pointer to network device structure
|
|
+ * \param dma_dev Pointer to dma device structure
|
|
+ * \return OK In case of successful data reception from dma
|
|
+ * -EIO Incorrect opt pointer provided by device
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int svip_mii_hw_receive(struct net_device *dev, struct dma_device_info *dma_dev)
|
|
+{
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+ unsigned char *buf = NULL;
|
|
+ struct sk_buff *skb = NULL;
|
|
+ int len = 0;
|
|
+
|
|
+ len = dma_device_read(dma_dev, &buf, (void **)&skb);
|
|
+
|
|
+ if (len >= ETHERNET_PACKET_DMA_BUFFER_SIZE) {
|
|
+ printk(KERN_INFO DRV_NAME ": packet too large %d\n", len);
|
|
+ goto mii_hw_receive_err_exit;
|
|
+ }
|
|
+
|
|
+ if (skb == NULL) {
|
|
+ printk(KERN_INFO DRV_NAME ": cannot restore pointer\n");
|
|
+ goto mii_hw_receive_err_exit;
|
|
+ }
|
|
+
|
|
+ if (len > (skb->end - skb->tail)) {
|
|
+ printk(KERN_INFO DRV_NAME ": BUG, len:%d end:%p tail:%p\n",
|
|
+ len, skb->end, skb->tail);
|
|
+ goto mii_hw_receive_err_exit;
|
|
+ }
|
|
+
|
|
+ skb_put(skb, len);
|
|
+ skb->dev = dev;
|
|
+ skb->protocol = eth_type_trans(skb, dev);
|
|
+ netif_rx(skb);
|
|
+
|
|
+ priv->stats.rx_packets++;
|
|
+ priv->stats.rx_bytes += len;
|
|
+ return 0;
|
|
+
|
|
+mii_hw_receive_err_exit:
|
|
+ if (len == 0) {
|
|
+ if (skb)
|
|
+ dev_kfree_skb_any(skb);
|
|
+ priv->stats.rx_errors++;
|
|
+ priv->stats.rx_dropped++;
|
|
+ return -EIO;
|
|
+ } else {
|
|
+ return len;
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Write data to Ethernet switch.
|
|
+ * This function writes the data comprised in skb structure via DMA to the
|
|
+ * Ethernet Switch. It is installed as the switch driver's hard_start_xmit
|
|
+ * method.
|
|
+ *
|
|
+ * \param skb Pointer to socket buffer structure that contains the data
|
|
+ * to be sent
|
|
+ * \param dev Pointer to network device structure which is used for
|
|
+ * data transmission
|
|
+ * \return 1 Transmission error
|
|
+ * \return 0 OK, successful data transmission
|
|
+ * \ingroup API
|
|
+ */
|
|
+static int svip_mii_hw_tx(char *buf, int len, struct net_device *dev)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+ struct dma_device_info *dma_dev = priv->dma_device;
|
|
+ ret = dma_device_write(dma_dev, buf, len, priv->skb);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int svip_mii_tx(struct sk_buff *skb, struct net_device *dev)
|
|
+{
|
|
+ int len;
|
|
+ char *data;
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+ struct dma_device_info *dma_dev = priv->dma_device;
|
|
+
|
|
+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
|
|
+ data = skb->data;
|
|
+ priv->skb = skb;
|
|
+ dev->trans_start = jiffies;
|
|
+ /* TODO: we got more than 1 dma channel,
|
|
+ so we should do something intelligent here to select one */
|
|
+ dma_dev->current_tx_chan = 0;
|
|
+
|
|
+ wmb();
|
|
+
|
|
+ if (svip_mii_hw_tx(data, len, dev) != len) {
|
|
+ dev_kfree_skb_any(skb);
|
|
+ priv->stats.tx_errors++;
|
|
+ priv->stats.tx_dropped++;
|
|
+ } else {
|
|
+ priv->stats.tx_packets++;
|
|
+ priv->stats.tx_bytes += len;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Transmission timeout callback.
|
|
+ * This functions is called when a trasmission timeout occurs. It will wake up
|
|
+ * the interface queue again.
|
|
+ *
|
|
+ * \param dev Device structure for Ethernet device
|
|
+ * \ingroup API
|
|
+ */
|
|
+void svip_mii_tx_timeout(struct net_device *dev)
|
|
+{
|
|
+ int i;
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+
|
|
+ priv->stats.tx_errors++;
|
|
+ for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
|
|
+ priv->dma_device->tx_chan[i]->disable_irq(priv->dma_device->tx_chan[i]);
|
|
+ netif_wake_queue(dev);
|
|
+ return;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Get device statistics.
|
|
+ * This functions returns the device statistics, stored in the device structure.
|
|
+ *
|
|
+ * \param dev Device structure for Ethernet device
|
|
+ * \return stats Pointer to statistics structure
|
|
+ * \ingroup API
|
|
+ */
|
|
+static struct net_device_stats *svip_get_stats(struct net_device *dev)
|
|
+{
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+ return &priv->stats;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Pseudo Interrupt handler for DMA.
|
|
+ * This function processes DMA interrupts notified to the switch device driver.
|
|
+ * The function is installed at the DMA core as interrupt handler for the
|
|
+ * switch dma device.
|
|
+ * It handles the following DMA interrupts:
|
|
+ * passes received data to the upper layer in case of rx interrupt,
|
|
+ * In case of a dma receive interrupt the received data is passed to the upper layer.
|
|
+ * In case of a transmit buffer full interrupt the transmit queue is stopped.
|
|
+ * In case of a transmission complete interrupt the transmit queue is restarted.
|
|
+ *
|
|
+ * \param dma_dev pointer to dma device structure
|
|
+ * \param status type of interrupt being notified (RCV_INT: dma receive
|
|
+ * interrupt, TX_BUF_FULL_INT: transmit buffer full interrupt,
|
|
+ * TRANSMIT_CPT_INT: transmission complete interrupt)
|
|
+ * \return OK In case of successful data reception from dma
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+int dma_intr_handler(struct dma_device_info *dma_dev, int status)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ switch (status) {
|
|
+ case RCV_INT:
|
|
+ svip_mii_hw_receive(svip_mii0_dev, dma_dev);
|
|
+ break;
|
|
+
|
|
+ case TX_BUF_FULL_INT:
|
|
+ printk(KERN_INFO DRV_NAME ": tx buffer full\n");
|
|
+ netif_stop_queue(svip_mii0_dev);
|
|
+ for (i = 0; i < dma_dev->max_tx_chan_num; i++) {
|
|
+ if ((dma_dev->tx_chan[i])->control == LTQ_DMA_CH_ON)
|
|
+ dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TRANSMIT_CPT_INT:
|
|
+
|
|
+#if 0
|
|
+ for (i = 0; i < dma_dev->max_tx_chan_num; i++)
|
|
+#if 0
|
|
+ dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]);
|
|
+#else
|
|
+ dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i], (char *)__FUNCTION__);
|
|
+#endif
|
|
+ netif_wake_queue(svip_mii0_dev);
|
|
+#endif
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Allocates buffer sufficient for Ethernet Frame.
|
|
+ * This function is installed as DMA callback function to be called on DMA
|
|
+ * receive interrupt.
|
|
+ *
|
|
+ * \param len Unused
|
|
+ * \param *byte_offset Pointer to byte offset
|
|
+ * \param **opt pointer to skb structure
|
|
+ * \return NULL In case of buffer allocation fails
|
|
+ * buffer Pointer to allocated memory
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+unsigned char *svip_etop_dma_buffer_alloc(int len, int *byte_offset, void **opt)
|
|
+{
|
|
+ unsigned char *buffer = NULL;
|
|
+ struct sk_buff *skb = NULL;
|
|
+
|
|
+ skb = dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE);
|
|
+ if (skb == NULL)
|
|
+ return NULL;
|
|
+
|
|
+ buffer = (unsigned char *)(skb->data);
|
|
+ skb_reserve(skb, 2);
|
|
+ *(int *)opt = (int)skb;
|
|
+ *byte_offset = 2;
|
|
+
|
|
+ return buffer;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Free DMA buffer.
|
|
+ * This function frees a buffer, which can be either a data buffer or an
|
|
+ * skb structure.
|
|
+ *
|
|
+ * \param *dataptr Pointer to data buffer
|
|
+ * \param *opt Pointer to skb structure
|
|
+ * \return 0 OK
|
|
+ * \ingroup Internal
|
|
+ */
|
|
+void svip_etop_dma_buffer_free(unsigned char *dataptr, void *opt)
|
|
+{
|
|
+ struct sk_buff *skb = NULL;
|
|
+
|
|
+ if (opt == NULL) {
|
|
+ kfree(dataptr);
|
|
+ } else {
|
|
+ skb = (struct sk_buff *)opt;
|
|
+ dev_kfree_skb_any(skb);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int svip_mii_dev_init(struct net_device *dev);
|
|
+
|
|
+static const struct net_device_ops svip_eth_netdev_ops = {
|
|
+ .ndo_init = svip_mii_dev_init,
|
|
+ .ndo_open = svip_mii_open,
|
|
+ .ndo_stop = svip_mii_release,
|
|
+ .ndo_start_xmit = svip_mii_tx,
|
|
+ .ndo_get_stats = svip_get_stats,
|
|
+ .ndo_tx_timeout = svip_mii_tx_timeout,
|
|
+};
|
|
+
|
|
+//#include <linux/device.h>
|
|
+
|
|
+/**
|
|
+ * Initialize switch driver.
|
|
+ * This functions initializes the switch driver structures and registers the
|
|
+ * Ethernet device.
|
|
+ *
|
|
+ * \param dev Device structure for Ethernet device
|
|
+ * \return 0 OK
|
|
+ * \return ENOMEM No memory for structures available
|
|
+ * \return -1 Error during DMA init or Ethernet address configuration.
|
|
+ * \ingroup API
|
|
+ */
|
|
+static int svip_mii_dev_init(struct net_device *dev)
|
|
+{
|
|
+ int i;
|
|
+ struct svip_mii_priv *priv = netdev_priv(dev);
|
|
+
|
|
+
|
|
+ ether_setup(dev);
|
|
+ printk(KERN_INFO DRV_NAME ": %s is up\n", dev->name);
|
|
+ dev->watchdog_timeo = 10 * HZ;
|
|
+ memset(priv, 0, sizeof(*priv));
|
|
+ priv->dma_device = dma_device_reserve("SW");
|
|
+ if (!priv->dma_device) {
|
|
+ BUG();
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ priv->dma_device->buffer_alloc = svip_etop_dma_buffer_alloc;
|
|
+ priv->dma_device->buffer_free = svip_etop_dma_buffer_free;
|
|
+ priv->dma_device->intr_handler = dma_intr_handler;
|
|
+
|
|
+ for (i = 0; i < priv->dma_device->max_rx_chan_num; i++)
|
|
+ priv->dma_device->rx_chan[i]->packet_size =
|
|
+ ETHERNET_PACKET_DMA_BUFFER_SIZE;
|
|
+
|
|
+ for (i = 0; i < priv->dma_device->max_tx_chan_num; i++) {
|
|
+ priv->dma_device->tx_chan[i]->tx_weight=DEFAULT_SW_CHANNEL_WEIGHT;
|
|
+ priv->dma_device->tx_chan[i]->packet_size =
|
|
+ ETHERNET_PACKET_DMA_BUFFER_SIZE;
|
|
+ }
|
|
+
|
|
+ dma_device_register(priv->dma_device);
|
|
+
|
|
+ printk(KERN_INFO DRV_NAME ": using mac=");
|
|
+
|
|
+ for (i = 0; i < 6; i++) {
|
|
+ dev->dev_addr[i] = mac_addr[i];
|
|
+ printk("%02X%c", dev->dev_addr[i], (i == 5) ? ('\n') : (':'));
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void svip_mii_chip_init(int mode)
|
|
+{
|
|
+}
|
|
+
|
|
+static int svip_mii_probe(struct platform_device *dev)
|
|
+{
|
|
+ int result = 0;
|
|
+ unsigned char *mac = (unsigned char *)dev->dev.platform_data;
|
|
+ svip_mii0_dev = alloc_etherdev(sizeof(struct svip_mii_priv));
|
|
+ svip_mii0_dev->netdev_ops = &svip_eth_netdev_ops;
|
|
+ memcpy(mac_addr, mac, 6);
|
|
+ strcpy(svip_mii0_dev->name, "eth%d");
|
|
+ svip_mii_chip_init(REV_MII_MODE);
|
|
+ result = register_netdev(svip_mii0_dev);
|
|
+ if (result) {
|
|
+ printk(KERN_INFO DRV_NAME
|
|
+ ": error %i registering device \"%s\"\n",
|
|
+ result, svip_mii0_dev->name);
|
|
+ goto out;
|
|
+ }
|
|
+ printk(KERN_INFO DRV_NAME ": driver loaded!\n");
|
|
+
|
|
+out:
|
|
+ return result;
|
|
+}
|
|
+
|
|
+static int svip_mii_remove(struct platform_device *dev)
|
|
+{
|
|
+ struct svip_mii_priv *priv = netdev_priv(svip_mii0_dev);
|
|
+
|
|
+ printk(KERN_INFO DRV_NAME ": cleanup\n");
|
|
+
|
|
+ dma_device_unregister(priv->dma_device);
|
|
+ dma_device_release(priv->dma_device);
|
|
+ kfree(priv->dma_device);
|
|
+ unregister_netdev(svip_mii0_dev);
|
|
+ free_netdev(svip_mii0_dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+static struct platform_driver svip_mii_driver = {
|
|
+ .probe = svip_mii_probe,
|
|
+ .remove = svip_mii_remove,
|
|
+ .driver = {
|
|
+ .name = DRV_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ },
|
|
+};
|
|
+
|
|
+
|
|
+/**
|
|
+ * Initialize switch driver as module.
|
|
+ * This functions initializes the switch driver structures and registers the
|
|
+ * Ethernet device for module usage.
|
|
+ *
|
|
+ * \return 0 OK
|
|
+ * \return ENODEV An error occured during initialization
|
|
+ * \ingroup API
|
|
+ */
|
|
+int __init svip_mii_init(void)
|
|
+{
|
|
+ int ret = platform_driver_register(&svip_mii_driver);
|
|
+ if (ret)
|
|
+ printk(KERN_INFO DRV_NAME
|
|
+ ": Error registering platfom driver!\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * Remove driver module.
|
|
+ * This functions removes the driver and unregisters all devices.
|
|
+ *
|
|
+ * \ingroup API
|
|
+ */
|
|
+static void __exit svip_mii_cleanup(void)
|
|
+{
|
|
+ platform_driver_unregister(&svip_mii_driver);
|
|
+}
|
|
+
|
|
+module_init(svip_mii_init);
|
|
+module_exit(svip_mii_cleanup);
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/mux.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/mux.c 2012-07-31 19:51:34.149105918 +0200
|
|
@@ -0,0 +1,187 @@
|
|
+/************************************************************************
|
|
+ *
|
|
+ * Copyright (c) 2007
|
|
+ * Infineon Technologies AG
|
|
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version
|
|
+ * 2 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ ************************************************************************/
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/init.h>
|
|
+#include <asm/addrspace.h>
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#include <lantiq_soc.h>
|
|
+#include <svip_mux.h>
|
|
+#include <sys1_reg.h>
|
|
+#include <sys2_reg.h>
|
|
+#include <svip_pms.h>
|
|
+
|
|
+#define DRV_NAME "ltq_mux"
|
|
+
|
|
+static void ltq_mux_port_init(const int port,
|
|
+ const struct ltq_mux_pin *pins,
|
|
+ const int pin_max)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < pin_max; i++)
|
|
+ ltq_gpio_configure(port,
|
|
+ i,
|
|
+ pins[i].dirin,
|
|
+ pins[i].puen,
|
|
+ pins[i].altsel0,
|
|
+ pins[i].altsel1);
|
|
+}
|
|
+
|
|
+static int ltq_mux_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct ltq_mux_settings *mux_settings = dev_get_platdata(&pdev->dev);
|
|
+
|
|
+ if (mux_settings->mux_p0)
|
|
+ ltq_mux_port_init(0,
|
|
+ mux_settings->mux_p0,
|
|
+ LTQ_MUX_P0_PINS);
|
|
+
|
|
+ if (mux_settings->mux_p1)
|
|
+ ltq_mux_port_init(1,
|
|
+ mux_settings->mux_p1,
|
|
+ LTQ_MUX_P1_PINS);
|
|
+
|
|
+ if (mux_settings->mux_p2)
|
|
+ ltq_mux_port_init(2,
|
|
+ mux_settings->mux_p2,
|
|
+ LTQ_MUX_P2_PINS);
|
|
+
|
|
+ if (mux_settings->mux_p3)
|
|
+ ltq_mux_port_init(3,
|
|
+ mux_settings->mux_p3,
|
|
+ LTQ_MUX_P3_PINS);
|
|
+
|
|
+ if (mux_settings->mux_p4)
|
|
+ ltq_mux_port_init(4,
|
|
+ mux_settings->mux_p4,
|
|
+ LTQ_MUX_P4_PINS);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ltq_mux_read_procmem(char *buf, char **start, off_t offset,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ int len = 0;
|
|
+ int t = 0, i = 0;
|
|
+ u32 port_clk[5] = {
|
|
+ SYS1_CLKENR_PORT0,
|
|
+ SYS1_CLKENR_PORT1,
|
|
+ SYS1_CLKENR_PORT2,
|
|
+ SYS1_CLKENR_PORT3,
|
|
+ SYS2_CLKENR_PORT4,
|
|
+ };
|
|
+
|
|
+#define PROC_PRINT(fmt, args...) \
|
|
+ do { \
|
|
+ int c_len = 0; \
|
|
+ c_len = snprintf(buf + len, count - len, fmt, ## args); \
|
|
+ if (c_len <= 0) \
|
|
+ goto out; \
|
|
+ if (c_len >= (count - len)) { \
|
|
+ len += (count - len); \
|
|
+ goto out; \
|
|
+ } \
|
|
+ len += c_len; \
|
|
+ if (offset > 0) { \
|
|
+ if (len > offset) { \
|
|
+ len -= offset; \
|
|
+ memmove(buf, buf + offset, len); \
|
|
+ offset = 0; \
|
|
+ } else { \
|
|
+ offset -= len; \
|
|
+ len = 0; \
|
|
+ } \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+ PROC_PRINT("\nVINETIC-SVIP Multiplex Settings\n");
|
|
+ PROC_PRINT(" 3 2 1 0\n");
|
|
+ PROC_PRINT(" 10987654321098765432109876543210\n");
|
|
+ PROC_PRINT(" --------------------------------\n");
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(port_clk); i++) {
|
|
+ if (i < 4) {
|
|
+ if (!svip_sys1_clk_is_enabled(port_clk[i]))
|
|
+ continue;
|
|
+ } else {
|
|
+ if (!svip_sys2_clk_is_enabled(port_clk[i]))
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ PROC_PRINT("P%d.%-10s", i, "DIR:");
|
|
+
|
|
+ for (t = 31; t != -1; t--)
|
|
+ PROC_PRINT("%d", ltq_port_get_dir(i, t) == 1 ? 1 : 0);
|
|
+ PROC_PRINT("\n");
|
|
+
|
|
+ PROC_PRINT("P%d.%-10s", i, "PUEN:");
|
|
+ for (t = 31; t != -1; t--)
|
|
+ PROC_PRINT("%d", ltq_port_get_puden(i, t) == 1 ? 1 : 0);
|
|
+ PROC_PRINT("\n");
|
|
+
|
|
+ PROC_PRINT("P%d.%-10s", i, "ALTSEL0:");
|
|
+ for (t = 31; t != -1; t--)
|
|
+ PROC_PRINT("%d",
|
|
+ ltq_port_get_altsel0(i, t) == 1 ? 1 : 0);
|
|
+ PROC_PRINT("\n");
|
|
+
|
|
+ PROC_PRINT("P%d.%-10s", i, "ALTSEL1:");
|
|
+ for (t = 31; t != -1; t--)
|
|
+ PROC_PRINT("%d",
|
|
+ ltq_port_get_altsel1(i, t) == 1 ? 1 : 0);
|
|
+ PROC_PRINT("\n\n");
|
|
+ }
|
|
+
|
|
+out:
|
|
+ if (len < 0) {
|
|
+ len = 0;
|
|
+ *eof = 1;
|
|
+ } else if (len < count) {
|
|
+ *eof = 1;
|
|
+ } else {
|
|
+ len = count;
|
|
+ }
|
|
+
|
|
+ *start = buf;
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static struct platform_driver ltq_mux_driver = {
|
|
+ .probe = ltq_mux_probe,
|
|
+ .driver = {
|
|
+ .name = DRV_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ },
|
|
+};
|
|
+
|
|
+int __init ltq_mux_init(void)
|
|
+{
|
|
+ int ret = platform_driver_register(<q_mux_driver);
|
|
+ if (ret) {
|
|
+ printk(KERN_INFO DRV_NAME
|
|
+ ": Error registering platform driver!");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return create_proc_read_entry("driver/ltq_mux", 0, NULL,
|
|
+ ltq_mux_read_procmem, NULL) == NULL;
|
|
+}
|
|
+
|
|
+module_init(ltq_mux_init);
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/pms.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/pms.c 2012-07-31 19:51:34.149105918 +0200
|
|
@@ -0,0 +1,101 @@
|
|
+/************************************************************************
|
|
+ *
|
|
+ * Copyright (c) 2007
|
|
+ * Infineon Technologies AG
|
|
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version
|
|
+ * 2 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ ************************************************************************/
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/init.h>
|
|
+#include <asm/addrspace.h>
|
|
+
|
|
+#include <base_reg.h>
|
|
+#include <sys1_reg.h>
|
|
+#include <sys2_reg.h>
|
|
+#include <lantiq_soc.h>
|
|
+
|
|
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
|
|
+static struct svip_reg_sys2 *const sys2 = (struct svip_reg_sys2 *)LTQ_SYS2_BASE;
|
|
+
|
|
+void svip_sys1_clk_enable(u32 mask)
|
|
+{
|
|
+ sys1_w32(sys1_r32(clksr) | mask, clkenr);
|
|
+ asm("sync;");
|
|
+}
|
|
+EXPORT_SYMBOL(svip_sys1_clk_enable);
|
|
+
|
|
+int svip_sys1_clk_is_enabled(u32 mask)
|
|
+{
|
|
+ return (sys1_r32(clksr) & mask) != 0;
|
|
+}
|
|
+EXPORT_SYMBOL(svip_sys1_clk_is_enabled);
|
|
+
|
|
+void svip_sys2_clk_enable(u32 mask)
|
|
+{
|
|
+ sys2_w32(sys2_r32(clksr) | mask, clkenr);
|
|
+ asm("sync;");
|
|
+}
|
|
+EXPORT_SYMBOL(svip_sys2_clk_enable);
|
|
+
|
|
+int svip_sys2_clk_is_enabled(u32 mask)
|
|
+{
|
|
+ return (sys2_r32(clksr) & mask) != 0;
|
|
+}
|
|
+EXPORT_SYMBOL(svip_sys2_clk_is_enabled);
|
|
+
|
|
+int ltq_pms_read_procmem(char *buf, char **start, off_t offset,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ long len = 0;
|
|
+ int t = 0;
|
|
+ u32 bit = 0;
|
|
+ u32 reg_tmp, bits_tmp;
|
|
+
|
|
+ len = sprintf(buf, "\nSVIP PMS Settings\n");
|
|
+ len = len + sprintf(buf + len,
|
|
+ " 3 2 1 0\n");
|
|
+ len = len + sprintf(buf + len,
|
|
+ " 210987654321098765432109876543210\n");
|
|
+ len = len + sprintf(buf + len,
|
|
+ "---------------------------------------------\n");
|
|
+ len = len + sprintf(buf + len,
|
|
+ "SYS1_CLKSR: ");
|
|
+ reg_tmp = sys1_r32(clksr);
|
|
+ bit = 0x80000000;
|
|
+ for (t = 31; t != -1; t--) {
|
|
+ bits_tmp = (reg_tmp & bit) >> t;
|
|
+ len = len + sprintf(buf + len, "%d", bits_tmp);
|
|
+ bit = bit >> 1;
|
|
+ }
|
|
+ len = len + sprintf(buf + len, "\n\n");
|
|
+ len = len + sprintf(buf + len, "SYS2_CLKSR: ");
|
|
+ reg_tmp = sys2_r32(clksr);
|
|
+ bit = 0x80000000;
|
|
+ for (t = 31; t != -1; t--) {
|
|
+ bits_tmp = (reg_tmp & bit) >> t;
|
|
+ len = len + sprintf(buf + len, "%d", bits_tmp);
|
|
+ bit = bit >> 1;
|
|
+ }
|
|
+ len = len + sprintf(buf + len, "\n\n");
|
|
+
|
|
+ *eof = 1;
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+int __init ltq_pms_init_proc(void)
|
|
+{
|
|
+ return create_proc_read_entry("driver/ltq_pms", 0, NULL,
|
|
+ ltq_pms_read_procmem, NULL) == NULL;
|
|
+}
|
|
+
|
|
+module_init(ltq_pms_init_proc);
|
|
Index: linux-3.3.8/drivers/spi/spi_svip.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/drivers/spi/spi_svip.c 2012-07-31 19:51:34.149105918 +0200
|
|
@@ -0,0 +1,955 @@
|
|
+/************************************************************************
|
|
+ *
|
|
+ * Copyright (c) 2008
|
|
+ * Infineon Technologies AG
|
|
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
|
|
+ *
|
|
+ * Inspired by Atmel AT32/AT91 SPI Controller driver
|
|
+ * Copyright (c) 2006 Atmel Corporation
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public License
|
|
+ * as published by the Free Software Foundation; either version
|
|
+ * 2 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ ************************************************************************/
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/spi/spi.h>
|
|
+
|
|
+#include <asm/io.h>
|
|
+
|
|
+#include <status_reg.h>
|
|
+#include <base_reg.h>
|
|
+#include <ssc_reg.h>
|
|
+#include <sys0_reg.h>
|
|
+#include <sys1_reg.h>
|
|
+
|
|
+#define SFRAME_SIZE 512 /* bytes */
|
|
+#define FIFO_HEADROOM 2 /* words */
|
|
+
|
|
+#define SVIP_SSC_RFIFO_WORDS 8
|
|
+
|
|
+enum svip_ssc_dir {
|
|
+ SSC_RXTX,
|
|
+ SSC_RX,
|
|
+ SSC_TX,
|
|
+ SSC_UNDEF
|
|
+};
|
|
+
|
|
+/*
|
|
+ * The core SPI transfer engine just talks to a register bank to set up
|
|
+ * DMA transfers; transfer queue progress is driven by IRQs. The clock
|
|
+ * framework provides the base clock, subdivided for each spi_device.
|
|
+ */
|
|
+struct svip_ssc_device {
|
|
+ struct svip_reg_ssc *regs;
|
|
+ enum svip_ssc_dir bus_dir;
|
|
+ struct spi_device *stay;
|
|
+
|
|
+ u8 stopping;
|
|
+ struct list_head queue;
|
|
+ struct spi_transfer *current_transfer;
|
|
+ int remaining_bytes;
|
|
+ int rx_bytes;
|
|
+ int tx_bytes;
|
|
+
|
|
+ char intname[4][16];
|
|
+
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+static int svip_ssc_setup(struct spi_device *spi);
|
|
+
|
|
+extern unsigned int ltq_get_fbs0_hz(void);
|
|
+
|
|
+static void cs_activate(struct svip_ssc_device *ssc_dev, struct spi_device *spi)
|
|
+{
|
|
+ ssc_dev->regs->whbgpostat = 0x0001 << spi->chip_select; /* activate the chip select */
|
|
+}
|
|
+
|
|
+static void cs_deactivate(struct svip_ssc_device *ssc_dev, struct spi_device *spi)
|
|
+{
|
|
+ ssc_dev->regs->whbgpostat = 0x0100 << spi->chip_select; /* deactivate the chip select */
|
|
+}
|
|
+
|
|
+/*
|
|
+ * "Normally" returns Byte Valid = 4.
|
|
+ * If the unaligned remainder of the packet is 3 bytes, these have to be
|
|
+ * transferred as a combination of a 16-bit and a 8-bit FPI transfer. For
|
|
+ * 2 or 1 remaining bytes a single 16-bit or 8-bit transfer will do.
|
|
+ */
|
|
+static int inline _estimate_bv(int byte_pos, int bytelen)
|
|
+{
|
|
+ int remainder = bytelen % 4;
|
|
+
|
|
+ if (byte_pos < (bytelen - remainder))
|
|
+ return 4;
|
|
+
|
|
+ if (remainder == 3)
|
|
+ {
|
|
+ if (byte_pos == (bytelen - remainder))
|
|
+ return 2;
|
|
+ else
|
|
+ return 1;
|
|
+ }
|
|
+ return remainder;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Submit next transfer.
|
|
+ * lock is held, spi irq is blocked
|
|
+ */
|
|
+static void svip_ssc_next_xfer(struct spi_master *master,
|
|
+ struct spi_message *msg)
|
|
+{
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_transfer *xfer;
|
|
+ unsigned char *buf_ptr;
|
|
+
|
|
+ xfer = ssc_dev->current_transfer;
|
|
+ if (!xfer || ssc_dev->remaining_bytes == 0) {
|
|
+ if (xfer)
|
|
+ xfer = list_entry(xfer->transfer_list.next,
|
|
+ struct spi_transfer, transfer_list);
|
|
+ else
|
|
+ xfer = list_entry(msg->transfers.next,
|
|
+ struct spi_transfer, transfer_list);
|
|
+ ssc_dev->remaining_bytes = xfer->len;
|
|
+ ssc_dev->rx_bytes = 0;
|
|
+ ssc_dev->tx_bytes = 0;
|
|
+ ssc_dev->current_transfer = xfer;
|
|
+ ssc_dev->regs->sfcon = 0; /* reset Serial Framing */
|
|
+
|
|
+ /* enable and flush RX/TX FIFO */
|
|
+ ssc_dev->regs->rxfcon =
|
|
+ SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS-FIFO_HEADROOM) |
|
|
+ SSC_RXFCON_RXFLU | /* Receive FIFO Flush */
|
|
+ SSC_RXFCON_RXFEN; /* Receive FIFO Enable */
|
|
+
|
|
+ ssc_dev->regs->txfcon =
|
|
+ SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM) |
|
|
+ SSC_TXFCON_TXFLU | /* Transmit FIFO Flush */
|
|
+ SSC_TXFCON_TXFEN; /* Transmit FIFO Enable */
|
|
+
|
|
+ asm("sync");
|
|
+
|
|
+ /* select mode RXTX, RX or TX */
|
|
+ if (xfer->rx_buf && xfer->tx_buf) /* RX and TX */
|
|
+ {
|
|
+ if (ssc_dev->bus_dir != SSC_RXTX)
|
|
+ {
|
|
+ ssc_dev->regs->mcon &= ~(SSC_MCON_RXOFF | SSC_MCON_TXOFF);
|
|
+ ssc_dev->bus_dir = SSC_RXTX;
|
|
+ ssc_dev->regs->irnen = SSC_IRNEN_T | SSC_IRNEN_F | SSC_IRNEN_E;
|
|
+ }
|
|
+ ssc_dev->regs->sfcon =
|
|
+ SSC_SFCON_PLEN_VAL(0) |
|
|
+ SSC_SFCON_DLEN_VAL(((xfer->len-1)%SFRAME_SIZE)*8+7) |
|
|
+ SSC_SFCON_STOP |
|
|
+ SSC_SFCON_ICLK_VAL(2) |
|
|
+ SSC_SFCON_IDAT_VAL(2) |
|
|
+ SSC_SFCON_IAEN |
|
|
+ SSC_SFCON_SFEN;
|
|
+
|
|
+ }
|
|
+ else if (xfer->rx_buf) /* RX only */
|
|
+ {
|
|
+ if (ssc_dev->bus_dir != SSC_RX)
|
|
+ {
|
|
+ ssc_dev->regs->mcon =
|
|
+ (ssc_dev->regs->mcon | SSC_MCON_TXOFF) & ~SSC_MCON_RXOFF;
|
|
+
|
|
+ ssc_dev->bus_dir = SSC_RX;
|
|
+
|
|
+ ssc_dev->regs->irnen = SSC_IRNEN_R | SSC_IRNEN_E;
|
|
+ }
|
|
+ /* Initiate clock generation for Rx-Only Transfer. In case of RX-only transfer,
|
|
+ * rx_bytes represents the number of already requested bytes.
|
|
+ */
|
|
+ ssc_dev->rx_bytes = min(xfer->len, (unsigned)(SVIP_SSC_RFIFO_WORDS*4));
|
|
+ ssc_dev->regs->rxreq = ssc_dev->rx_bytes;
|
|
+ }
|
|
+ else /* TX only */
|
|
+ {
|
|
+ if (ssc_dev->bus_dir != SSC_TX)
|
|
+ {
|
|
+ ssc_dev->regs->mcon =
|
|
+ (ssc_dev->regs->mcon | SSC_MCON_RXOFF) & ~SSC_MCON_TXOFF;
|
|
+
|
|
+ ssc_dev->bus_dir = SSC_TX;
|
|
+
|
|
+ ssc_dev->regs->irnen =
|
|
+ SSC_IRNEN_T | SSC_IRNEN_F | SSC_IRNEN_E;
|
|
+ }
|
|
+ ssc_dev->regs->sfcon =
|
|
+ SSC_SFCON_PLEN_VAL(0) |
|
|
+ SSC_SFCON_DLEN_VAL(((xfer->len-1)%SFRAME_SIZE)*8+7) |
|
|
+ SSC_SFCON_STOP |
|
|
+ SSC_SFCON_ICLK_VAL(2) |
|
|
+ SSC_SFCON_IDAT_VAL(2) |
|
|
+ SSC_SFCON_IAEN |
|
|
+ SSC_SFCON_SFEN;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (xfer->tx_buf)
|
|
+ {
|
|
+ int outstanding;
|
|
+ int i;
|
|
+ int fstat = ssc_dev->regs->fstat;
|
|
+ int txffl = SSC_FSTAT_TXFFL_GET(fstat);
|
|
+ int rxffl = SSC_FSTAT_RXFFL_GET(fstat);
|
|
+
|
|
+ outstanding = txffl;
|
|
+
|
|
+ if (xfer->rx_buf)
|
|
+ {
|
|
+ outstanding += rxffl;
|
|
+ if (SSC_STATE_BSY_GET(ssc_dev->regs->state))
|
|
+ outstanding++;
|
|
+
|
|
+ while (rxffl) /* is 0 in TX-Only mode */
|
|
+ {
|
|
+ unsigned int rb;
|
|
+ int rxbv = _estimate_bv(ssc_dev->rx_bytes, xfer->len);
|
|
+ rb = ssc_dev->regs->rb;
|
|
+ for (i=0; i<rxbv; i++)
|
|
+ {
|
|
+ ((unsigned char*)xfer->rx_buf)[ssc_dev->rx_bytes] =
|
|
+ (rb >> ((rxbv-i-1)*8)) & 0xFF;
|
|
+
|
|
+ ssc_dev->rx_bytes++;
|
|
+ }
|
|
+ rxffl--;
|
|
+ outstanding--;
|
|
+ }
|
|
+ ssc_dev->remaining_bytes = xfer->len - ssc_dev->rx_bytes;
|
|
+ }
|
|
+
|
|
+ /* for last Tx cycle set TxFifo threshold to 0 */
|
|
+ if ((xfer->len - ssc_dev->tx_bytes) <=
|
|
+ (4*(SVIP_SSC_RFIFO_WORDS-1-outstanding)))
|
|
+ {
|
|
+ ssc_dev->regs->txfcon = SSC_TXFCON_TXFITL_VAL(0) |
|
|
+ SSC_TXFCON_TXFEN;
|
|
+ }
|
|
+
|
|
+ while ((ssc_dev->tx_bytes < xfer->len) &&
|
|
+ (outstanding < (SVIP_SSC_RFIFO_WORDS-1)))
|
|
+ {
|
|
+ unsigned int tb = 0;
|
|
+ int txbv = _estimate_bv(ssc_dev->tx_bytes, xfer->len);
|
|
+
|
|
+ for (i=0; i<txbv; i++)
|
|
+ {
|
|
+ tb |= ((unsigned char*)xfer->tx_buf)[ssc_dev->tx_bytes] <<
|
|
+ ((txbv-i-1)*8);
|
|
+
|
|
+ ssc_dev->tx_bytes++;
|
|
+ }
|
|
+ switch(txbv)
|
|
+ {
|
|
+#ifdef __BIG_ENDIAN
|
|
+ case 1:
|
|
+ *((unsigned char *)(&(ssc_dev->regs->tb))+3) = tb & 0xFF;
|
|
+ break;
|
|
+ case 2:
|
|
+ *((unsigned short *)(&(ssc_dev->regs->tb))+1) = tb & 0xFFFF;
|
|
+ break;
|
|
+#else /* __LITTLE_ENDIAN */
|
|
+ case 1:
|
|
+ *((unsigned char *)(&(ssc_dev->regs->tb))) = tb & 0xFF;
|
|
+ break;
|
|
+ case 2:
|
|
+ *((unsigned short *)(&(ssc_dev->regs->tb))) = tb & 0xFFFF;
|
|
+ break;
|
|
+#endif
|
|
+ default:
|
|
+ ssc_dev->regs->tb = tb;
|
|
+ }
|
|
+ outstanding++;
|
|
+ }
|
|
+ }
|
|
+ else /* xfer->tx_buf == NULL -> RX only! */
|
|
+ {
|
|
+ int j;
|
|
+ int rxffl = SSC_FSTAT_RXFFL_GET(ssc_dev->regs->fstat);
|
|
+ int rxbv = 0;
|
|
+ unsigned int rbuf;
|
|
+
|
|
+ buf_ptr = (unsigned char*)xfer->rx_buf +
|
|
+ (xfer->len - ssc_dev->remaining_bytes);
|
|
+
|
|
+ for (j = 0; j < rxffl; j++)
|
|
+ {
|
|
+ rxbv = SSC_STATE_RXBV_GET(ssc_dev->regs->state);
|
|
+ rbuf = ssc_dev->regs->rb;
|
|
+
|
|
+ if (rxbv == 4)
|
|
+ {
|
|
+ *((unsigned int*)buf_ptr+j) = ntohl(rbuf);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ int b;
|
|
+#ifdef __BIG_ENDIAN
|
|
+ for (b = 0; b < rxbv; b++)
|
|
+ {
|
|
+ buf_ptr[4*j+b] = ((unsigned char*)(&rbuf))[4-rxbv+b];
|
|
+ }
|
|
+#else /* __LITTLE_ENDIAN */
|
|
+ for (b = 0; b < rxbv; b++)
|
|
+ {
|
|
+ buf_ptr[4*j+b] = ((unsigned char*)(&rbuf))[rxbv-1-b];
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ ssc_dev->remaining_bytes -= rxbv;
|
|
+ }
|
|
+ if ((ssc_dev->rx_bytes < xfer->len) &&
|
|
+ !SSC_STATE_BSY_GET(ssc_dev->regs->state))
|
|
+ {
|
|
+ int rxreq = min(xfer->len - ssc_dev->rx_bytes,
|
|
+ (unsigned)(SVIP_SSC_RFIFO_WORDS*4));
|
|
+
|
|
+ ssc_dev->rx_bytes += rxreq;
|
|
+ ssc_dev->regs->rxreq = rxreq;
|
|
+ }
|
|
+
|
|
+ if (ssc_dev->remaining_bytes < 0)
|
|
+ {
|
|
+ printk("ssc_dev->remaining_bytes = %d! xfer->len = %d, "
|
|
+ "rxffl=%d, rxbv=%d\n", ssc_dev->remaining_bytes, xfer->len,
|
|
+ rxffl, rxbv);
|
|
+
|
|
+ ssc_dev->remaining_bytes = 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Submit next message.
|
|
+ * lock is held
|
|
+ */
|
|
+static void svip_ssc_next_message(struct spi_master *master)
|
|
+{
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_message *msg;
|
|
+ struct spi_device *spi;
|
|
+
|
|
+ BUG_ON(ssc_dev->current_transfer);
|
|
+
|
|
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
|
|
+ spi = msg->spi;
|
|
+
|
|
+ dev_dbg(master->dev.parent, "start message %p on %p\n", msg, spi);
|
|
+
|
|
+ /* select chip if it's not still active */
|
|
+ if (ssc_dev->stay) {
|
|
+ if (ssc_dev->stay != spi) {
|
|
+ cs_deactivate(ssc_dev, ssc_dev->stay);
|
|
+ svip_ssc_setup(spi);
|
|
+ cs_activate(ssc_dev, spi);
|
|
+ }
|
|
+ ssc_dev->stay = NULL;
|
|
+ }
|
|
+ else {
|
|
+ svip_ssc_setup(spi);
|
|
+ cs_activate(ssc_dev, spi);
|
|
+ }
|
|
+
|
|
+ svip_ssc_next_xfer(master, msg);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Report message completion.
|
|
+ * lock is held
|
|
+ */
|
|
+static void
|
|
+svip_ssc_msg_done(struct spi_master *master, struct svip_ssc_device *ssc_dev,
|
|
+ struct spi_message *msg, int status, int stay)
|
|
+{
|
|
+ if (!stay || status < 0)
|
|
+ cs_deactivate(ssc_dev, msg->spi);
|
|
+ else
|
|
+ ssc_dev->stay = msg->spi;
|
|
+
|
|
+ list_del(&msg->queue);
|
|
+ msg->status = status;
|
|
+
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "xfer complete: %u bytes transferred\n",
|
|
+ msg->actual_length);
|
|
+
|
|
+ spin_unlock(&ssc_dev->lock);
|
|
+ msg->complete(msg->context);
|
|
+ spin_lock(&ssc_dev->lock);
|
|
+
|
|
+ ssc_dev->current_transfer = NULL;
|
|
+
|
|
+ /* continue if needed */
|
|
+ if (list_empty(&ssc_dev->queue) || ssc_dev->stopping)
|
|
+ ; /* TODO: disable hardware */
|
|
+ else
|
|
+ svip_ssc_next_message(master);
|
|
+}
|
|
+
|
|
+static irqreturn_t svip_ssc_eir_handler(int irq, void *dev_id)
|
|
+{
|
|
+ struct platform_device *pdev = (struct platform_device*)dev_id;
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+
|
|
+ dev_err (&pdev->dev, "ERROR: errirq. STATE = 0x%0lx\n",
|
|
+ ssc_dev->regs->state);
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t svip_ssc_rir_handler(int irq, void *dev_id)
|
|
+{
|
|
+ struct platform_device *pdev = (struct platform_device*)dev_id;
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_message *msg;
|
|
+ struct spi_transfer *xfer;
|
|
+
|
|
+ xfer = ssc_dev->current_transfer;
|
|
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
|
|
+
|
|
+ /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
|
|
+ * handler for spurious Interrupts!
|
|
+ */
|
|
+ if (!xfer) {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
|
|
+ goto out;
|
|
+ }
|
|
+ if ( !(xfer->rx_buf) ) {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): xfer->rx_buf = NULL\n", __FUNCTION__, irq);
|
|
+ goto out;
|
|
+ }
|
|
+ if (ssc_dev->remaining_bytes > 0)
|
|
+ {
|
|
+ /*
|
|
+ * Keep going, we still have data to send in
|
|
+ * the current transfer.
|
|
+ */
|
|
+ svip_ssc_next_xfer(master, msg);
|
|
+ }
|
|
+
|
|
+ if (ssc_dev->remaining_bytes == 0)
|
|
+ {
|
|
+ msg->actual_length += xfer->len;
|
|
+
|
|
+ if (msg->transfers.prev == &xfer->transfer_list) {
|
|
+ /* report completed message */
|
|
+ svip_ssc_msg_done(master, ssc_dev, msg, 0,
|
|
+ xfer->cs_change);
|
|
+ }
|
|
+ else {
|
|
+ if (xfer->cs_change) {
|
|
+ cs_deactivate(ssc_dev, msg->spi);
|
|
+ udelay(1); /* not nice in interrupt context */
|
|
+ cs_activate(ssc_dev, msg->spi);
|
|
+ }
|
|
+
|
|
+ /* Not done yet. Submit the next transfer. */
|
|
+ svip_ssc_next_xfer(master, msg);
|
|
+ }
|
|
+ }
|
|
+out:
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t svip_ssc_tir_handler(int irq, void *dev_id)
|
|
+{
|
|
+ struct platform_device *pdev = (struct platform_device*)dev_id;
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_message *msg;
|
|
+ struct spi_transfer *xfer;
|
|
+ int tx_remain;
|
|
+
|
|
+ xfer = ssc_dev->current_transfer;
|
|
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
|
|
+
|
|
+ /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
|
|
+ * handler for spurious Interrupts!
|
|
+ */
|
|
+ if (!xfer) {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
|
|
+ goto out;
|
|
+ }
|
|
+ if ( !(xfer->tx_buf) ) {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__, irq);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (ssc_dev->remaining_bytes > 0)
|
|
+ {
|
|
+ tx_remain = xfer->len - ssc_dev->tx_bytes;
|
|
+ if ( tx_remain == 0 )
|
|
+ {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): tx_remain = 0\n", __FUNCTION__, irq);
|
|
+ }
|
|
+ else
|
|
+ /*
|
|
+ * Keep going, we still have data to send in
|
|
+ * the current transfer.
|
|
+ */
|
|
+ svip_ssc_next_xfer(master, msg);
|
|
+ }
|
|
+out:
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t svip_ssc_fir_handler(int irq, void *dev_id)
|
|
+{
|
|
+ struct platform_device *pdev = (struct platform_device*)dev_id;
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_message *msg;
|
|
+ struct spi_transfer *xfer;
|
|
+
|
|
+ xfer = ssc_dev->current_transfer;
|
|
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
|
|
+
|
|
+ /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
|
|
+ * handler for spurious Interrupts!
|
|
+ */
|
|
+ if (!xfer) {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
|
|
+ goto out;
|
|
+ }
|
|
+ if ( !(xfer->tx_buf) ) {
|
|
+ dev_dbg(master->dev.parent,
|
|
+ "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__, irq);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (ssc_dev->remaining_bytes > 0)
|
|
+ {
|
|
+ int tx_remain = xfer->len - ssc_dev->tx_bytes;
|
|
+
|
|
+ if (tx_remain == 0)
|
|
+ {
|
|
+ /* Frame interrupt gets raised _before_ last Rx interrupt */
|
|
+ if (xfer->rx_buf)
|
|
+ {
|
|
+ svip_ssc_next_xfer(master, msg);
|
|
+ if (ssc_dev->remaining_bytes)
|
|
+ printk("expected RXTX transfer to be complete!\n");
|
|
+ }
|
|
+ ssc_dev->remaining_bytes = 0;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ ssc_dev->regs->sfcon = SSC_SFCON_PLEN_VAL(0) |
|
|
+ SSC_SFCON_DLEN_VAL(SFRAME_SIZE*8-1) |
|
|
+ SSC_SFCON_STOP |
|
|
+ SSC_SFCON_ICLK_VAL(2) |
|
|
+ SSC_SFCON_IDAT_VAL(2) |
|
|
+ SSC_SFCON_IAEN |
|
|
+ SSC_SFCON_SFEN;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ssc_dev->remaining_bytes == 0)
|
|
+ {
|
|
+ msg->actual_length += xfer->len;
|
|
+
|
|
+ if (msg->transfers.prev == &xfer->transfer_list) {
|
|
+ /* report completed message */
|
|
+ svip_ssc_msg_done(master, ssc_dev, msg, 0,
|
|
+ xfer->cs_change);
|
|
+ }
|
|
+ else {
|
|
+ if (xfer->cs_change) {
|
|
+ cs_deactivate(ssc_dev, msg->spi);
|
|
+ udelay(1); /* not nice in interrupt context */
|
|
+ cs_activate(ssc_dev, msg->spi);
|
|
+ }
|
|
+
|
|
+ /* Not done yet. Submit the next transfer. */
|
|
+ svip_ssc_next_xfer(master, msg);
|
|
+ }
|
|
+ }
|
|
+
|
|
+out:
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+/* the spi->mode bits understood by this driver: */
|
|
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP)
|
|
+
|
|
+static int svip_ssc_setup(struct spi_device *spi)
|
|
+{
|
|
+ struct spi_master *master = spi->master;
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ unsigned int bits = spi->bits_per_word;
|
|
+ unsigned int br, sck_hz = spi->max_speed_hz;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (ssc_dev->stopping)
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ if (spi->chip_select >= master->num_chipselect) {
|
|
+ dev_dbg(&spi->dev,
|
|
+ "setup: invalid chipselect %u (%u defined)\n",
|
|
+ spi->chip_select, master->num_chipselect);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (bits == 0)
|
|
+ bits = 8;
|
|
+ if (bits != 8) {
|
|
+ dev_dbg(&spi->dev,
|
|
+ "setup: invalid bits_per_word %u (expect 8)\n",
|
|
+ bits);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (spi->mode & ~MODEBITS) {
|
|
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
|
|
+ spi->mode & ~MODEBITS);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Disable SSC */
|
|
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_CLREN;
|
|
+
|
|
+ if (sck_hz == 0)
|
|
+ sck_hz = 10000;
|
|
+
|
|
+ br = ltq_get_fbs0_hz()/(2 *sck_hz);
|
|
+ if (ltq_get_fbs0_hz()%(2 *sck_hz) == 0)
|
|
+ br = br -1;
|
|
+ ssc_dev->regs->br = br;
|
|
+
|
|
+ /* set Control Register */
|
|
+ ssc_dev->regs->mcon = SSC_MCON_ENBV |
|
|
+ SSC_MCON_RUEN |
|
|
+ SSC_MCON_TUEN |
|
|
+ SSC_MCON_AEN |
|
|
+ SSC_MCON_REN |
|
|
+ SSC_MCON_TEN |
|
|
+ (spi->mode & SPI_CPOL ? SSC_MCON_PO : 0) | /* Clock Polarity */
|
|
+ (spi->mode & SPI_CPHA ? 0 : SSC_MCON_PH) | /* Tx on trailing edge */
|
|
+ (spi->mode & SPI_LOOP ? SSC_MCON_LB : 0) | /* Loopback */
|
|
+ (spi->mode & SPI_LSB_FIRST ? 0 : SSC_MCON_HB); /* MSB first */
|
|
+ ssc_dev->bus_dir = SSC_UNDEF;
|
|
+
|
|
+ /* Enable SSC */
|
|
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_SETEN;
|
|
+ asm("sync");
|
|
+
|
|
+ spin_lock_irqsave(&ssc_dev->lock, flags);
|
|
+ if (ssc_dev->stay == spi)
|
|
+ ssc_dev->stay = NULL;
|
|
+ cs_deactivate(ssc_dev, spi);
|
|
+ spin_unlock_irqrestore(&ssc_dev->lock, flags);
|
|
+
|
|
+ dev_dbg(&spi->dev,
|
|
+ "setup: %u Hz bpw %u mode 0x%02x cs %u\n",
|
|
+ sck_hz, bits, spi->mode, spi->chip_select);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int svip_ssc_transfer(struct spi_device *spi, struct spi_message *msg)
|
|
+{
|
|
+ struct spi_master *master = spi->master;
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_transfer *xfer;
|
|
+ unsigned long flags;
|
|
+
|
|
+ dev_dbg(&spi->dev, "new message %p submitted\n", msg);
|
|
+
|
|
+ if (unlikely(list_empty(&msg->transfers)
|
|
+ || !spi->max_speed_hz)) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ssc_dev->stopping)
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
|
|
+ if (!(xfer->tx_buf || xfer->rx_buf) || (xfer->len == 0)) {
|
|
+ dev_dbg(&spi->dev, "missing rx or tx buf\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* FIXME implement these protocol options!! */
|
|
+ if (xfer->bits_per_word || xfer->speed_hz) {
|
|
+ dev_dbg(&spi->dev, "no protocol options yet\n");
|
|
+ return -ENOPROTOOPT;
|
|
+ }
|
|
+
|
|
+#ifdef VERBOSE
|
|
+ dev_dbg(spi->dev,
|
|
+ " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
|
|
+ xfer, xfer->len,
|
|
+ xfer->tx_buf, xfer->tx_dma,
|
|
+ xfer->rx_buf, xfer->rx_dma);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ msg->status = -EINPROGRESS;
|
|
+ msg->actual_length = 0;
|
|
+
|
|
+ spin_lock_irqsave(&ssc_dev->lock, flags);
|
|
+ list_add_tail(&msg->queue, &ssc_dev->queue);
|
|
+ if (!ssc_dev->current_transfer)
|
|
+ {
|
|
+ /* start transmission machine, if not started yet */
|
|
+ svip_ssc_next_message(master);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&ssc_dev->lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void svip_ssc_cleanup(struct spi_device *spi)
|
|
+{
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(spi->master);
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (!spi->controller_state)
|
|
+ return;
|
|
+
|
|
+ spin_lock_irqsave(&ssc_dev->lock, flags);
|
|
+ if (ssc_dev->stay == spi) {
|
|
+ ssc_dev->stay = NULL;
|
|
+ cs_deactivate(ssc_dev, spi);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&ssc_dev->lock, flags);
|
|
+}
|
|
+
|
|
+/*-------------------------------------------------------------------------*/
|
|
+
|
|
+static int __init svip_ssc_probe(struct platform_device *pdev)
|
|
+{
|
|
+ int ret;
|
|
+ struct spi_master *master;
|
|
+ struct svip_ssc_device *ssc_dev;
|
|
+ struct resource *res_regs;
|
|
+ int irq;
|
|
+
|
|
+ ret = -ENOMEM;
|
|
+
|
|
+ /* setup spi core then atmel-specific driver state */
|
|
+ master = spi_alloc_master(&pdev->dev, sizeof (*ssc_dev));
|
|
+ if (!master)
|
|
+ {
|
|
+ dev_err (&pdev->dev, "ERROR: no memory for master spi\n");
|
|
+ goto errout;
|
|
+ }
|
|
+
|
|
+ ssc_dev = spi_master_get_devdata(master);
|
|
+ platform_set_drvdata(pdev, master);
|
|
+
|
|
+ master->bus_num = pdev->id;
|
|
+ master->num_chipselect = 8;
|
|
+ master->mode_bits = MODEBITS;
|
|
+ master->setup = svip_ssc_setup;
|
|
+ master->transfer = svip_ssc_transfer;
|
|
+ master->cleanup = svip_ssc_cleanup;
|
|
+
|
|
+ spin_lock_init(&ssc_dev->lock);
|
|
+ INIT_LIST_HEAD(&ssc_dev->queue);
|
|
+
|
|
+ /* retrive register configration */
|
|
+ res_regs = platform_get_resource_byname (pdev, IORESOURCE_MEM, "regs");
|
|
+ if (NULL == res_regs)
|
|
+ {
|
|
+ dev_err (&pdev->dev, "ERROR: missed 'regs' resource\n");
|
|
+ goto spierr;
|
|
+ }
|
|
+
|
|
+ ssc_dev->regs = (struct svip_reg_ssc*)KSEG1ADDR(res_regs->start);
|
|
+
|
|
+ irq = platform_get_irq_byname (pdev, "tx");
|
|
+ if (irq < 0)
|
|
+ goto irqerr;
|
|
+ sprintf(ssc_dev->intname[0], "%s_tx", pdev->name);
|
|
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_tir_handler,
|
|
+ IRQF_DISABLED, ssc_dev->intname[0], pdev);
|
|
+ if (ret != 0)
|
|
+ goto irqerr;
|
|
+
|
|
+ irq = platform_get_irq_byname (pdev, "rx");
|
|
+ if (irq < 0)
|
|
+ goto irqerr;
|
|
+ sprintf(ssc_dev->intname[1], "%s_rx", pdev->name);
|
|
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_rir_handler,
|
|
+ IRQF_DISABLED, ssc_dev->intname[1], pdev);
|
|
+ if (ret != 0)
|
|
+ goto irqerr;
|
|
+
|
|
+ irq = platform_get_irq_byname (pdev, "err");
|
|
+ if (irq < 0)
|
|
+ goto irqerr;
|
|
+ sprintf(ssc_dev->intname[2], "%s_err", pdev->name);
|
|
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_eir_handler,
|
|
+ IRQF_DISABLED, ssc_dev->intname[2], pdev);
|
|
+ if (ret != 0)
|
|
+ goto irqerr;
|
|
+
|
|
+ irq = platform_get_irq_byname (pdev, "frm");
|
|
+ if (irq < 0)
|
|
+ goto irqerr;
|
|
+ sprintf(ssc_dev->intname[3], "%s_frm", pdev->name);
|
|
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_fir_handler,
|
|
+ IRQF_DISABLED, ssc_dev->intname[3], pdev);
|
|
+ if (ret != 0)
|
|
+ goto irqerr;
|
|
+
|
|
+ /*
|
|
+ * Initialize the Hardware
|
|
+ */
|
|
+
|
|
+ /* Clear enable bit, i.e. put SSC into configuration mode */
|
|
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_CLREN;
|
|
+ /* enable SSC core to run at fpi clock */
|
|
+ ssc_dev->regs->clc = SSC_CLC_RMC_VAL(1);
|
|
+ asm("sync");
|
|
+
|
|
+ /* GPIO CS */
|
|
+ ssc_dev->regs->gpocon = SSC_GPOCON_ISCSBN_VAL(0xFF);
|
|
+ ssc_dev->regs->whbgpostat = SSC_WHBGPOSTAT_SETOUTN_VAL(0xFF); /* CS to high */
|
|
+
|
|
+ /* Set Master mode */
|
|
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_SETMS;
|
|
+
|
|
+ /* enable and flush RX/TX FIFO */
|
|
+ ssc_dev->regs->rxfcon = SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS-FIFO_HEADROOM) |
|
|
+ SSC_RXFCON_RXFLU | /* Receive FIFO Flush */
|
|
+ SSC_RXFCON_RXFEN; /* Receive FIFO Enable */
|
|
+
|
|
+ ssc_dev->regs->txfcon = SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM) |
|
|
+ SSC_TXFCON_TXFLU | /* Transmit FIFO Flush */
|
|
+ SSC_TXFCON_TXFEN; /* Transmit FIFO Enable */
|
|
+ asm("sync");
|
|
+
|
|
+ /* enable IRQ */
|
|
+ ssc_dev->regs->irnen = SSC_IRNEN_E;
|
|
+
|
|
+ dev_info(&pdev->dev, "controller at 0x%08lx (irq %d)\n",
|
|
+ (unsigned long)ssc_dev->regs, platform_get_irq_byname (pdev, "rx"));
|
|
+
|
|
+ ret = spi_register_master(master);
|
|
+ if (ret)
|
|
+ goto out_reset_hw;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out_reset_hw:
|
|
+
|
|
+irqerr:
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "tx"), pdev);
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "rx"), pdev);
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "err"), pdev);
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "frm"), pdev);
|
|
+
|
|
+spierr:
|
|
+
|
|
+ spi_master_put(master);
|
|
+
|
|
+errout:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int __exit svip_ssc_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+ struct spi_message *msg;
|
|
+
|
|
+ /* reset the hardware and block queue progress */
|
|
+ spin_lock_irq(&ssc_dev->lock);
|
|
+ ssc_dev->stopping = 1;
|
|
+ /* TODO: shutdown hardware */
|
|
+ spin_unlock_irq(&ssc_dev->lock);
|
|
+
|
|
+ /* Terminate remaining queued transfers */
|
|
+ list_for_each_entry(msg, &ssc_dev->queue, queue) {
|
|
+ /* REVISIT unmapping the dma is a NOP on ARM and AVR32
|
|
+ * but we shouldn't depend on that...
|
|
+ */
|
|
+ msg->status = -ESHUTDOWN;
|
|
+ msg->complete(msg->context);
|
|
+ }
|
|
+
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "tx"), pdev);
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "rx"), pdev);
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "err"), pdev);
|
|
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "frm"), pdev);
|
|
+
|
|
+ spi_unregister_master(master);
|
|
+ platform_set_drvdata(pdev, NULL);
|
|
+ spi_master_put(master);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int svip_ssc_suspend(struct platform_device *pdev, pm_message_t mesg)
|
|
+{
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+
|
|
+ clk_disable(ssc_dev->clk);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int svip_ssc_resume(struct platform_device *pdev)
|
|
+{
|
|
+ struct spi_master *master = platform_get_drvdata(pdev);
|
|
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
|
|
+
|
|
+ clk_enable(ssc_dev->clk);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static struct platform_driver svip_ssc_driver = {
|
|
+ .driver = {
|
|
+ .name = "ifx_ssc",
|
|
+ .owner = THIS_MODULE,
|
|
+ },
|
|
+ .probe = svip_ssc_probe,
|
|
+#ifdef CONFIG_PM
|
|
+ .suspend = svip_ssc_suspend,
|
|
+ .resume = svip_ssc_resume,
|
|
+#endif
|
|
+ .remove = __exit_p(svip_ssc_remove)
|
|
+};
|
|
+
|
|
+int __init svip_ssc_init(void)
|
|
+{
|
|
+ return platform_driver_register(&svip_ssc_driver);
|
|
+}
|
|
+
|
|
+void __exit svip_ssc_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&svip_ssc_driver);
|
|
+}
|
|
+
|
|
+module_init(svip_ssc_init);
|
|
+module_exit(svip_ssc_exit);
|
|
+
|
|
+MODULE_ALIAS("platform:ifx_ssc");
|
|
+MODULE_DESCRIPTION("Lantiq SSC Controller driver");
|
|
+MODULE_AUTHOR("Andreas Schmidt <andreas.schmidt@infineon.com>");
|
|
+MODULE_AUTHOR("Jevgenijs Grigorjevs <Jevgenijs.Grigorjevs@lantiq.com>");
|
|
+MODULE_LICENSE("GPL");
|
|
Index: linux-3.3.8/net/ipv4/svip_nat.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/net/ipv4/svip_nat.c 2012-07-31 20:04:32.985139213 +0200
|
|
@@ -0,0 +1,1569 @@
|
|
+/******************************************************************************
|
|
+
|
|
+ Copyright (c) 2009
|
|
+ Lantiq Deutschland GmbH
|
|
+ Am Campeon 3; 81726 Munich, Germany
|
|
+
|
|
+ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE,
|
|
+ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS
|
|
+ SOFTWARE IS FREE OF CHARGE.
|
|
+
|
|
+ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS
|
|
+ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING
|
|
+ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP,
|
|
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE
|
|
+ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD
|
|
+ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL
|
|
+ PROPERTY INFRINGEMENT.
|
|
+
|
|
+ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT
|
|
+ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM
|
|
+ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
+ DEALINGS IN THE SOFTWARE.
|
|
+
|
|
+ ****************************************************************************
|
|
+
|
|
+Description : This file contains implementation of Custom NAT function
|
|
+for Infineon's VINETIC-SVIP16
|
|
+ *******************************************************************************/
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/netfilter_ipv4.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/inetdevice.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/ip.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/udp.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/in6.h> /* just to shut up a warning */
|
|
+#include <linux/miscdevice.h>
|
|
+#include <asm/checksum.h>
|
|
+
|
|
+#include <linux/svip_nat.h>
|
|
+
|
|
+MODULE_AUTHOR("Lantiq Deutschland GmbH");
|
|
+MODULE_DESCRIPTION("SVIP Network Address Translation module");
|
|
+MODULE_LICENSE("GPL");
|
|
+
|
|
+#define SVIP_NAT_INFO_STR "@(#)SVIP NAT, version "SVIP_NAT_VERSION
|
|
+
|
|
+/** maximum voice packet channels possible on the SVIP LC system
|
|
+ (equals maximum number of Codec channels possible) */
|
|
+#define SVIP_SYS_CODEC_NUM ((SVIP_SYS_NUM) * (SVIP_CODEC_NUM))
|
|
+
|
|
+/** end UDP port number of the SVIP Linecard System */
|
|
+#define SVIP_UDP_TO ((SVIP_UDP_FROM) + (SVIP_SYS_CODEC_NUM) - 1)
|
|
+
|
|
+/** end UDP port number of the Master SVIP in SVIP Linecard System */
|
|
+#define SVIP_UDP_TO_VOFW0 ((SVIP_UDP_FROM) + (SVIP_CODEC_NUM) - 1)
|
|
+
|
|
+#define SVIP_PORT_INRANGE(nPort) \
|
|
+ ((nPort) >= (SVIP_UDP_FROM) && (nPort) <= (SVIP_UDP_TO))
|
|
+
|
|
+#define SVIP_PORT_INDEX(nPort) (nPort - SVIP_UDP_FROM)
|
|
+
|
|
+#define SVIP_NET_DEV_ETH0_IDX 0
|
|
+#define SVIP_NET_DEV_VETH0_IDX 1
|
|
+#define SVIP_NET_DEV_LO_IDX 2
|
|
+
|
|
+#define SVIP_NET_DEV_ETH0_NAME "eth0"
|
|
+#define SVIP_NET_DEV_ETH1_NAME "eth1"
|
|
+#define SVIP_NET_DEV_VETH1_NAME "veth0"
|
|
+#define SVIP_NET_DEV_LO_NAME "lo"
|
|
+
|
|
+#define SVIP_NAT_STATS_LOC2REM 0
|
|
+#define SVIP_NAT_STATS_REM2LOC 1
|
|
+#define SVIP_NAT_STATS_TYPES 2
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
|
|
+#define SVIP_NAT_FOR_EACH_NETDEV(d) for_each_netdev(&init_net, dev)
|
|
+#define SVIP_NAT_IP_HDR(ethhdr) ip_hdr(ethhdr)
|
|
+#else
|
|
+#define SVIP_NAT_FOR_EACH_NETDEV(d) for(d=dev_base; dev; dev = dev->next)
|
|
+#define SVIP_NAT_IP_HDR(ethhdr) (ethhdr)->nh.iph
|
|
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+#define SVIP_NAT_SKB_MAC_HEADER(ethhdr) (ethhdr)->mac.ethernet
|
|
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
|
|
+#define SVIP_NAT_SKB_MAC_HEADER(ethhdr) (ethhdr)->mac.raw
|
|
+#else
|
|
+#define SVIP_NAT_SKB_MAC_HEADER(ethhdr) skb_mac_header(ethhdr)
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
|
|
+#define VLAN_DEV_REAL_DEV(dev) vlan_dev_real_dev(dev)
|
|
+#define VLAN_DEV_VLAN_ID(dev) vlan_dev_vlan_id(dev)
|
|
+#else
|
|
+#define VLAN_DEV_REAL_DEV(dev) (VLAN_DEV_INFO(dev)->real_dev)
|
|
+#define VLAN_DEV_VLAN_ID(dev) (VLAN_DEV_INFO(dev)->vlan_id)
|
|
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */
|
|
+
|
|
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
|
|
+#define MOD_INC_USE_COUNT
|
|
+#define MOD_DEC_USE_COUNT
|
|
+#endif
|
|
+
|
|
+#if ! ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && \
|
|
+ (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)))
|
|
+#define VLAN_8021Q_UNUSED
|
|
+#endif
|
|
+
|
|
+
|
|
+extern spinlock_t vlan_group_lock;
|
|
+extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, unsigned short VID);
|
|
+
|
|
+typedef struct SVIP_NAT_stats
|
|
+{
|
|
+ unsigned long inPackets;
|
|
+ unsigned long outPackets;
|
|
+ unsigned long outErrors;
|
|
+} SVIP_NAT_stats_t;
|
|
+
|
|
+typedef struct SVIP_NAT_table_entry
|
|
+{
|
|
+ SVIP_NAT_IO_Rule_t natRule;
|
|
+ SVIP_NAT_stats_t natStats[SVIP_NAT_STATS_TYPES];
|
|
+} SVIP_NAT_table_entry_t;
|
|
+
|
|
+/* pointer to the SVIP NAT table */
|
|
+static SVIP_NAT_table_entry_t *pNatTable = NULL;
|
|
+
|
|
+struct net_device *net_devs[3];
|
|
+static u32 *paddr_eth0;
|
|
+static u32 *paddr_eth0_0;
|
|
+static u32 *paddr_veth0;
|
|
+static u32 *pmask_veth0;
|
|
+
|
|
+static struct semaphore *sem_nat_tbl_access;
|
|
+static int proc_read_in_progress = 0;
|
|
+
|
|
+static int nDeviceOpen = 0;
|
|
+
|
|
+/* saves the NAT table index between subsequent invocation */
|
|
+static int nProcReadIdx = 0;
|
|
+
|
|
+static long SVIP_NAT_device_ioctl(struct file *,unsigned int ,unsigned long);
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
|
|
+static int SVIP_NAT_device_release (struct inode *,struct file *);
|
|
+#else
|
|
+static void SVIP_NAT_device_release (struct inode *,struct file *);
|
|
+#endif
|
|
+static int SVIP_NAT_device_open (struct inode *,struct file *);
|
|
+
|
|
+/* This structure holds the interface functions supported by
|
|
+ the SVIP NAT configuration device. */
|
|
+struct file_operations SVIP_NAT_Fops = {
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
|
|
+owner: THIS_MODULE,
|
|
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) */
|
|
+ llseek: NULL, /* seek */
|
|
+ read: NULL,
|
|
+ write: NULL,
|
|
+ readdir: NULL, /* readdir */
|
|
+ poll: NULL, /* select */
|
|
+ unlocked_ioctl: SVIP_NAT_device_ioctl, /* ioctl */
|
|
+ mmap: NULL, /* mmap */
|
|
+ open: SVIP_NAT_device_open, /* open, */
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
|
|
+ flush: NULL, /* flush */
|
|
+#endif
|
|
+ release: SVIP_NAT_device_release /* close */
|
|
+};
|
|
+
|
|
+/** Structure holding MISC module operations */
|
|
+static struct miscdevice SVIP_NAT_miscdev =
|
|
+{
|
|
+minor: MINOR_NUM_SVIP_NAT,
|
|
+ name: SVIP_NAT_DEVICE_NAME,
|
|
+ fops: &SVIP_NAT_Fops
|
|
+};
|
|
+
|
|
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
|
|
+int nSVIP_NAT_Sniffer;
|
|
+unsigned char pSVIP_NAT_SnifferMAC[ETH_ALEN];
|
|
+int nSVIP_NAT_SnifferMacSet;
|
|
+#endif
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Function to read /proc/net/svip_nat/nat proc entry
|
|
+
|
|
+ \arguments
|
|
+ page - pointer to page buffer
|
|
+ start - pointer to start address pointer
|
|
+ off - offset
|
|
+ count - maximum data length to read
|
|
+ eof - end of file flag
|
|
+ data - proc read data (provided by the function
|
|
+ pointed to by data)
|
|
+
|
|
+ \return
|
|
+ length of read data
|
|
+
|
|
+ \remarks:
|
|
+ Each call of this routine forces a copy_to_user of the data returned by
|
|
+ 'fn'. This routine will be called by the user until 'len = 0'.
|
|
+ ****************************************************************************/
|
|
+static int SVIP_NAT_ProcRead (char *page, char **start, off_t off,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ int (*fn)(char *buf, int size);
|
|
+ int len;
|
|
+
|
|
+ /* If the NAT table index is negative, the reading has completed */
|
|
+ if (nProcReadIdx < 0)
|
|
+ {
|
|
+ nProcReadIdx = 0;
|
|
+ *eof = 1;
|
|
+ proc_read_in_progress = 0;
|
|
+ up(sem_nat_tbl_access);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ if (!proc_read_in_progress)
|
|
+ {
|
|
+ proc_read_in_progress = 1;
|
|
+ local_irq_restore(flags);
|
|
+ /* we use this semaphore in order to ensure no other party(could be ioctl
|
|
+ FIO_SVIP_NAT_RULE_LIST), uses function SVIP_NAT_ProcReadNAT(), during
|
|
+ the time read of the proc file takes place */
|
|
+ down(sem_nat_tbl_access);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ local_irq_restore(flags);
|
|
+ }
|
|
+
|
|
+ if (data != NULL)
|
|
+ {
|
|
+ fn = data;
|
|
+ len = fn (page, count);
|
|
+ /* In this setup each read of the proc entries returns the read data by
|
|
+ 'fn' to the user. The user keeps issuing read requests as long as the
|
|
+ returned value of 'len' is greater than zero. */
|
|
+ *eof = 1;
|
|
+ *start = page;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ len = 0;
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
|
|
+/**
|
|
+ Function to read remaining proc entries
|
|
+ */
|
|
+static int SVIP_NAT_ProcReadGen (char *page, char **start, off_t off,
|
|
+ int count, int *eof, void *data)
|
|
+{
|
|
+ int (*fn)(char *buf, int size);
|
|
+ int len = 0;
|
|
+
|
|
+ MOD_INC_USE_COUNT;
|
|
+
|
|
+ if (data == NULL)
|
|
+ {
|
|
+ MOD_DEC_USE_COUNT;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ fn = data;
|
|
+ len = fn (page, count);
|
|
+
|
|
+ if (len <= off + count)
|
|
+ {
|
|
+ *eof = 1;
|
|
+ }
|
|
+ *start = page + off;
|
|
+ len -= off;
|
|
+ if (len > count)
|
|
+ {
|
|
+ len = count;
|
|
+ }
|
|
+ if (len < 0)
|
|
+ {
|
|
+ len = 0;
|
|
+ }
|
|
+
|
|
+ MOD_DEC_USE_COUNT;
|
|
+
|
|
+ return len;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Function for setting up /proc/net/svip_nat read data
|
|
+
|
|
+ \arguments
|
|
+ buf - pointer to read buffer
|
|
+ count - size of read buffer
|
|
+
|
|
+ \return
|
|
+ length of read data into buffer
|
|
+
|
|
+ \remarks:
|
|
+ The global variable 'nProcReadIdx' is used to save the table index where
|
|
+ the reading of the NAT table stopped. Reading is stopped when the end of
|
|
+ the read buffer is approached. On the next itteration the reading continues
|
|
+ from the saved index.
|
|
+ *******************************************************************************/
|
|
+static int SVIP_NAT_ProcReadNAT(char *buf, int count)
|
|
+{
|
|
+ int i, j;
|
|
+ int len = 0;
|
|
+ SVIP_NAT_IO_Rule_t *pNatRule;
|
|
+
|
|
+ if (nProcReadIdx == -1)
|
|
+ {
|
|
+ nProcReadIdx = 0;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (nProcReadIdx == 0)
|
|
+ {
|
|
+ len = sprintf(buf+len,
|
|
+ "Remote host IP " /* 16 char */
|
|
+ "Remote host MAC " /* 19 char */
|
|
+ "Local host IP " /* 15 char */
|
|
+ "Local host MAC " /* 19 char */
|
|
+ "Local host UDP " /* 16 char */
|
|
+ "Loc->Rem(in/out/err) " /* 22 char */
|
|
+ "Rem->Loc(in/out/err)\n\r");
|
|
+ }
|
|
+
|
|
+ for (i = nProcReadIdx; i < SVIP_SYS_CODEC_NUM; i++)
|
|
+ {
|
|
+ int slen;
|
|
+
|
|
+ pNatRule = &pNatTable[i].natRule;
|
|
+
|
|
+ if (pNatRule->remIP != 0)
|
|
+ {
|
|
+ /* make sure not to overwrite the buffer */
|
|
+ if (count < len+120)
|
|
+ break;
|
|
+
|
|
+ /* remIP */
|
|
+ slen = sprintf(buf+len, "%d.%d.%d.%d",
|
|
+ (int)((pNatRule->remIP >> 24) & 0xff),
|
|
+ (int)((pNatRule->remIP >> 16) & 0xff),
|
|
+ (int)((pNatRule->remIP >> 8) & 0xff),
|
|
+ (int)((pNatRule->remIP >> 0) & 0xff));
|
|
+ len += slen;
|
|
+ for (j = 0; j < (16-slen); j++)
|
|
+ len += sprintf(buf+len, " ");
|
|
+
|
|
+ /* remMAC */
|
|
+ slen = 0;
|
|
+ for (j = 0; j < ETH_ALEN; j++)
|
|
+ {
|
|
+ slen += sprintf(buf+len+slen, "%02x%s",
|
|
+ pNatRule->remMAC[j], j < ETH_ALEN-1 ? ":" : " ");
|
|
+ }
|
|
+ len += slen;
|
|
+ for (j = 0; j < (19-slen); j++)
|
|
+ len += sprintf(buf+len, " ");
|
|
+
|
|
+ /* locIP */
|
|
+ slen = sprintf(buf+len, "%d.%d.%d.%d",
|
|
+ (int)((pNatRule->locIP >> 24) & 0xff),
|
|
+ (int)((pNatRule->locIP >> 16) & 0xff),
|
|
+ (int)((pNatRule->locIP >> 8) & 0xff),
|
|
+ (int)((pNatRule->locIP >> 0) & 0xff));
|
|
+ len += slen;
|
|
+ for (j = 0; j < (15-slen); j++)
|
|
+ len += sprintf(buf+len, " ");
|
|
+
|
|
+ /* locMAC */
|
|
+ slen = 0;
|
|
+ for (j = 0; j < ETH_ALEN; j++)
|
|
+ {
|
|
+ slen += sprintf(buf+len+slen, "%02x%s",
|
|
+ pNatRule->locMAC[j], j < ETH_ALEN-1 ? ":" : " ");
|
|
+ }
|
|
+ len += slen;
|
|
+ for (j = 0; j < (19-slen); j++)
|
|
+ len += sprintf(buf+len, " ");
|
|
+
|
|
+ /* locUDP */
|
|
+ slen = sprintf(buf+len, "%d", pNatRule->locUDP);
|
|
+ len += slen;
|
|
+ for (j = 0; j < (16-slen); j++)
|
|
+ len += sprintf(buf+len, " ");
|
|
+
|
|
+ /* NAT statistics, Local to Remote translation */
|
|
+ slen = sprintf(buf+len, "(%ld/%ld/%ld)",
|
|
+ pNatTable[i].natStats[SVIP_NAT_STATS_LOC2REM].inPackets,
|
|
+ pNatTable[i].natStats[SVIP_NAT_STATS_LOC2REM].outPackets,
|
|
+ pNatTable[i].natStats[SVIP_NAT_STATS_LOC2REM].outErrors);
|
|
+ len += slen;
|
|
+ for (j = 0; j < (22-slen); j++)
|
|
+ len += sprintf(buf+len, " ");
|
|
+
|
|
+ /* NAT statistics, Remote to Local translation */
|
|
+ len += sprintf(buf+len, "(%ld/%ld/%ld)\n\r",
|
|
+ pNatTable[i].natStats[SVIP_NAT_STATS_REM2LOC].inPackets,
|
|
+ pNatTable[i].natStats[SVIP_NAT_STATS_REM2LOC].outPackets,
|
|
+ pNatTable[i].natStats[SVIP_NAT_STATS_REM2LOC].outErrors);
|
|
+ }
|
|
+ }
|
|
+ if (i == SVIP_SYS_CODEC_NUM)
|
|
+ nProcReadIdx = -1; /* reading completed */
|
|
+ else
|
|
+ nProcReadIdx = i; /* reading still in process, buffer was full */
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
|
|
+/**
|
|
+ Converts MAC address from ascii to hex respesentaion
|
|
+ */
|
|
+static int SVIP_NAT_MacAsciiToHex(const char *pMacStr, unsigned char *pMacHex)
|
|
+{
|
|
+ int i=0, c=0, b=0, n=0;
|
|
+
|
|
+ memset(pMacHex, 0, ETH_ALEN);
|
|
+ while (pMacStr[i] != '\0')
|
|
+ {
|
|
+ if (n >= 0)
|
|
+ {
|
|
+ unsigned char nToHex = 0;
|
|
+
|
|
+ /* check for hex digit */
|
|
+ if (pMacStr[i] >= '0' && pMacStr[i] <= '9')
|
|
+ nToHex = 0x30;
|
|
+ else if (pMacStr[i] >= 'a' && pMacStr[i] <= 'f')
|
|
+ nToHex = 0x57;
|
|
+ else if (pMacStr[i] >= 'A' && pMacStr[i] <= 'F')
|
|
+ nToHex = 0x37;
|
|
+ else
|
|
+ {
|
|
+ if (n != 0)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: invalid MAC address format[%s]\n", pMacStr);
|
|
+ return -1;
|
|
+ }
|
|
+ i++;
|
|
+ continue;
|
|
+ }
|
|
+ n^=1;
|
|
+ pMacHex[b] |= ((pMacStr[i] - nToHex)&0xf) << (4*n);
|
|
+ if (n == 0)
|
|
+ {
|
|
+ /* advance to next byte, check if complete */
|
|
+ if (++b >= ETH_ALEN)
|
|
+ return 0;
|
|
+ /* byte completed, next we expect a colon... */
|
|
+ c = 1;
|
|
+ /* and, do not check for hex digit */
|
|
+ n = -1;
|
|
+ }
|
|
+ i++;
|
|
+ continue;
|
|
+ }
|
|
+ if (c == 1)
|
|
+ {
|
|
+ if (pMacStr[i] == ':')
|
|
+ {
|
|
+ /* next we expect hex digit, again */
|
|
+ n = 0;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: invalid MAC address format[%s]\n", pMacStr);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+ i++;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ Used to set the destination MAC address of a host where incoming
|
|
+ SVIP VoFW packets are to be addressed. In case the address is set
|
|
+ to 00:00:00:00:00:00 (the default case), the packets will written
|
|
+ out to eth0 with its original MAC addess.
|
|
+
|
|
+ \remark
|
|
+usage: 'echo "00:03:19:00:15:D1" > cat /proc/net/svip_nat/snifferMAC'
|
|
+*/
|
|
+int SVIP_NAT_ProcWriteSnifferMAC (struct file *file, const char *buffer,
|
|
+ unsigned long count, void *data)
|
|
+{
|
|
+ /* at least strlen("xx:xx:xx:xx:xx:xx") characters, followed by '\0' */
|
|
+ if (count >= 18)
|
|
+ {
|
|
+ int ret;
|
|
+
|
|
+ ret = SVIP_NAT_MacAsciiToHex(buffer, pSVIP_NAT_SnifferMAC);
|
|
+
|
|
+ if (ret != 0)
|
|
+ return 0;
|
|
+
|
|
+ if (!(pSVIP_NAT_SnifferMAC[0]==0 && pSVIP_NAT_SnifferMAC[1]==0 &&
|
|
+ pSVIP_NAT_SnifferMAC[2]==0 && pSVIP_NAT_SnifferMAC[3]==0 &&
|
|
+ pSVIP_NAT_SnifferMAC[4]==0 && pSVIP_NAT_SnifferMAC[5]==0))
|
|
+ {
|
|
+ nSVIP_NAT_SnifferMacSet = 1;
|
|
+ }
|
|
+ }
|
|
+ return count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ Used to read the destination MAC address of a sniffer host
|
|
+ */
|
|
+int SVIP_NAT_ProcReadSnifferMAC (char *buf, int count)
|
|
+{
|
|
+ int len = 0;
|
|
+
|
|
+ len = snprintf(buf, count, "%02x:%02x:%02x:%02x:%02x:%02x\n",
|
|
+ pSVIP_NAT_SnifferMAC[0], pSVIP_NAT_SnifferMAC[1],
|
|
+ pSVIP_NAT_SnifferMAC[2], pSVIP_NAT_SnifferMAC[3],
|
|
+ pSVIP_NAT_SnifferMAC[4], pSVIP_NAT_SnifferMAC[5]);
|
|
+
|
|
+ if (len > count)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: Only part of the text could be put into the buffer\n");
|
|
+ return count;
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/**
|
|
+ Used to switch VoFW message sniffer on/off
|
|
+
|
|
+ \remark
|
|
+usage: 'echo "1" > cat /proc/net/svip_nat/snifferOnOff'
|
|
+*/
|
|
+int SVIP_NAT_ProcWriteSnifferOnOff (struct file *file, const char *buffer,
|
|
+ unsigned long count, void *data)
|
|
+{
|
|
+ /* at least one digit expected, followed by '\0' */
|
|
+ if (count >= 2)
|
|
+ {
|
|
+ int ret, nSnifferOnOff;
|
|
+
|
|
+ ret = sscanf(buffer, "%d", &nSnifferOnOff);
|
|
+
|
|
+ if (ret != 1)
|
|
+ return count;
|
|
+
|
|
+ if (nSnifferOnOff > 0)
|
|
+ nSnifferOnOff = 1;
|
|
+
|
|
+ nSVIP_NAT_Sniffer = nSnifferOnOff;
|
|
+ }
|
|
+ return count;
|
|
+}
|
|
+
|
|
+/**
|
|
+ Used to read the VoFW message sniffer configuration (on/off)
|
|
+ */
|
|
+int SVIP_NAT_ProcReadSnifferOnOff (char *buf, int count)
|
|
+{
|
|
+ int len = 0;
|
|
+
|
|
+ len = snprintf(buf, count, "%d\n", nSVIP_NAT_Sniffer);
|
|
+
|
|
+ if (len > count)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: Only part of the text could be put into the buffer\n");
|
|
+ return count;
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Creates proc read/write entries
|
|
+
|
|
+ \return
|
|
+ 0 on success, -1 on error
|
|
+ */
|
|
+/******************************************************************************/
|
|
+static int SVIP_NAT_ProcInstall(void)
|
|
+{
|
|
+ struct proc_dir_entry *pProcParentDir, *pProcDir;
|
|
+ struct proc_dir_entry *pProcNode;
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
|
|
+ pProcParentDir = proc_net;
|
|
+#else
|
|
+ pProcParentDir = init_net.proc_net;
|
|
+#endif
|
|
+ pProcDir = proc_mkdir(SVIP_NAT_DEVICE_NAME, pProcParentDir);
|
|
+ if (pProcDir == NULL)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: cannot create proc dir %s/%s\n\r",
|
|
+ pProcParentDir->name, SVIP_NAT_DEVICE_NAME);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ pProcNode = create_proc_read_entry("nat", S_IFREG|S_IRUGO, pProcDir,
|
|
+ SVIP_NAT_ProcRead, (void *)SVIP_NAT_ProcReadNAT);
|
|
+ if (pProcNode == NULL)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: cannot create proc entry %s/%s",
|
|
+ pProcDir->name, "nat");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
|
|
+ nSVIP_NAT_Sniffer = 0;
|
|
+ /* creates proc entry for switching on/off sniffer to VoFW messages */
|
|
+ pProcNode = create_proc_read_entry("snifferOnOff", S_IFREG|S_IRUGO|S_IWUGO,
|
|
+ pProcDir, SVIP_NAT_ProcReadGen, (void *)SVIP_NAT_ProcReadSnifferOnOff);
|
|
+ if (pProcNode == NULL)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: cannot create proc entry %s/%s\n\r",
|
|
+ pProcDir->name, "snifferOnOff");
|
|
+ return -1;
|
|
+ }
|
|
+ pProcNode->write_proc = SVIP_NAT_ProcWriteSnifferOnOff;
|
|
+
|
|
+ memset (pSVIP_NAT_SnifferMAC, 0, ETH_ALEN);
|
|
+ nSVIP_NAT_SnifferMacSet = 0;
|
|
+ /* creates proc entry for setting MAC address of sniffer host to VoFW messages */
|
|
+ pProcNode = create_proc_read_entry("snifferMAC", S_IFREG|S_IRUGO|S_IWUGO,
|
|
+ pProcDir, SVIP_NAT_ProcReadGen, (void *)SVIP_NAT_ProcReadSnifferMAC);
|
|
+ if (pProcNode == NULL)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: cannot create proc entry %s/%s\n\r",
|
|
+ pProcDir->name, "snifferMAC");
|
|
+ return -1;
|
|
+ }
|
|
+ pProcNode->write_proc = SVIP_NAT_ProcWriteSnifferMAC;
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ No actions done here, simply a check is performed if an open has already
|
|
+ been performed. Currently only a single open is allowed as it is a sufficient
|
|
+ to have hat a single process configuring the SVIP NAT at one time.
|
|
+
|
|
+ \arguments
|
|
+ inode - pointer to disk file data
|
|
+ file - pointer to device file data
|
|
+
|
|
+ \return
|
|
+ 0 on success, else -1
|
|
+ */
|
|
+/******************************************************************************/
|
|
+static int SVIP_NAT_device_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct in_device *in_dev;
|
|
+ struct in_ifaddr *ifa;
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ local_irq_save(flags);
|
|
+#else
|
|
+ local_save_flags(flags);
|
|
+#endif
|
|
+
|
|
+ if (nDeviceOpen)
|
|
+ {
|
|
+ MOD_INC_USE_COUNT;
|
|
+ local_irq_restore(flags);
|
|
+ nDeviceOpen++;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* find pointer to IP address of eth0 */
|
|
+ if ((in_dev=in_dev_get(net_devs[SVIP_NET_DEV_ETH0_IDX])) != NULL)
|
|
+ {
|
|
+ for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next)
|
|
+ {
|
|
+ if (!paddr_eth0 && ifa->ifa_address != 0)
|
|
+ {
|
|
+ paddr_eth0 = &ifa->ifa_address;
|
|
+ continue;
|
|
+ }
|
|
+ if (paddr_eth0 && ifa->ifa_address != 0)
|
|
+ {
|
|
+ paddr_eth0_0 = &ifa->ifa_address;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ in_dev_put(in_dev);
|
|
+ }
|
|
+ if (paddr_eth0 == NULL || paddr_eth0_0 == NULL)
|
|
+ {
|
|
+ local_irq_restore(flags);
|
|
+ return -ENODATA;
|
|
+ }
|
|
+
|
|
+ /* find pointer to IP address of veth0 */
|
|
+ if ((in_dev=in_dev_get(net_devs[SVIP_NET_DEV_VETH0_IDX])) != NULL)
|
|
+ {
|
|
+ for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next)
|
|
+ {
|
|
+ if (ifa->ifa_address != 0)
|
|
+ {
|
|
+ paddr_veth0 = &ifa->ifa_address;
|
|
+ pmask_veth0 = &ifa->ifa_mask;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ in_dev_put(in_dev);
|
|
+ }
|
|
+ if (paddr_veth0 == NULL)
|
|
+ {
|
|
+ local_irq_restore(flags);
|
|
+ return -ENODATA;
|
|
+ }
|
|
+
|
|
+ MOD_INC_USE_COUNT;
|
|
+ nDeviceOpen++;
|
|
+ local_irq_restore(flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ This function is called when a process closes the SVIP NAT device file
|
|
+
|
|
+ \arguments
|
|
+ inode - pointer to disk file data
|
|
+ file - pointer to device file data
|
|
+
|
|
+ \return
|
|
+ 0 on success, else -1
|
|
+
|
|
+*/
|
|
+/******************************************************************************/
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
|
|
+static int SVIP_NAT_device_release(struct inode *inode,
|
|
+ struct file *file)
|
|
+#else
|
|
+static void SVIP_NAT_device_release(struct inode *inode,
|
|
+ struct file *file)
|
|
+#endif
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ save_flags(flags);
|
|
+ cli();
|
|
+#else
|
|
+ local_save_flags(flags);
|
|
+#endif
|
|
+
|
|
+ /* The device can now be openned by the next caller */
|
|
+ nDeviceOpen--;
|
|
+
|
|
+ MOD_DEC_USE_COUNT;
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ restore_flags(flags);
|
|
+#else
|
|
+ local_irq_restore(flags);
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
|
|
+ return 0;
|
|
+#endif
|
|
+}
|
|
+
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ This function is called when a process closes the SVIP NAT device file
|
|
+
|
|
+ \arguments
|
|
+ inode - pointer to disk file data
|
|
+ file - pointer to device file data
|
|
+ ioctl_num - ioctl number requested
|
|
+ ioctl_param - pointer to data related to the ioctl number
|
|
+
|
|
+ \return
|
|
+ 0 on success, else -1
|
|
+
|
|
+*/
|
|
+/******************************************************************************/
|
|
+long SVIP_NAT_device_ioctl (struct file *file,
|
|
+ unsigned int ioctl_num, unsigned long ioctl_param)
|
|
+{
|
|
+ int ret = 0;
|
|
+ SVIP_NAT_IO_Rule_t *pNatRule, *pNatRuleIn;
|
|
+ SVIP_UDP_PORT_t nPort;
|
|
+ int nNatIdx;
|
|
+ int bWrite = 0;
|
|
+ int bRead = 0;
|
|
+ unsigned char *pData = 0;
|
|
+ int nSize;
|
|
+
|
|
+ if (_IOC_DIR(ioctl_num) & _IOC_WRITE)
|
|
+ bWrite = 1;
|
|
+ if (_IOC_DIR(ioctl_num) & _IOC_READ)
|
|
+ bRead = 1;
|
|
+ nSize = _IOC_SIZE(ioctl_num);
|
|
+
|
|
+ if (nSize > sizeof(int))
|
|
+ {
|
|
+ if (bRead || bWrite)
|
|
+ {
|
|
+ pData = kmalloc (nSize, GFP_KERNEL);
|
|
+ if (bWrite)
|
|
+ {
|
|
+ if (copy_from_user ((void *)pData, (void *)ioctl_param, nSize) != 0)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: ioctl %x: copy_from_user() failed!\n", ioctl_num);
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ switch (ioctl_num)
|
|
+ {
|
|
+ case FIO_SVIP_NAT_RULE_ADD:
|
|
+
|
|
+ pNatRuleIn = (SVIP_NAT_IO_Rule_t *)pData;
|
|
+
|
|
+ /* check if destination UDP port is within range */
|
|
+ nPort = ntohs(pNatRuleIn->locUDP);
|
|
+
|
|
+ if (!SVIP_PORT_INRANGE(nPort))
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: Error, UDP port(%d) is out of range(%d..%d)\n",
|
|
+ nPort, SVIP_UDP_FROM, SVIP_UDP_TO);
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+ nNatIdx = SVIP_PORT_INDEX(nPort);
|
|
+
|
|
+ down(sem_nat_tbl_access);
|
|
+ pNatRule = &pNatTable[nNatIdx].natRule;
|
|
+
|
|
+ /* add rule to the NAT table */
|
|
+ pNatRule->remIP = pNatRuleIn->remIP;
|
|
+ memcpy((char *)pNatRule->remMAC, (char *)pNatRuleIn->remMAC, ETH_ALEN);
|
|
+ pNatRule->locIP = pNatRuleIn->locIP;
|
|
+ memcpy((char *)pNatRule->locMAC, (char *)pNatRuleIn->locMAC, ETH_ALEN);
|
|
+ pNatRule->locUDP = pNatRuleIn->locUDP;
|
|
+
|
|
+ memset(pNatTable[nNatIdx].natStats, 0,
|
|
+ sizeof(SVIP_NAT_stats_t)*SVIP_NAT_STATS_TYPES);
|
|
+ up(sem_nat_tbl_access);
|
|
+ break;
|
|
+
|
|
+ case FIO_SVIP_NAT_RULE_REMOVE:
|
|
+
|
|
+ pNatRuleIn = (SVIP_NAT_IO_Rule_t *)pData;
|
|
+
|
|
+ /* check if destination UDP port is within range */
|
|
+ nPort = ntohs(pNatRuleIn->locUDP);
|
|
+ if (!SVIP_PORT_INRANGE(nPort))
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: Error, UDP port(%d) is out of range(%d..%d)\n",
|
|
+ nPort, SVIP_UDP_FROM, SVIP_UDP_TO);
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+ nNatIdx = SVIP_PORT_INDEX(nPort);
|
|
+ down(sem_nat_tbl_access);
|
|
+ /* remove rule from the NAT table */
|
|
+ memset(&pNatTable[nNatIdx], 0, sizeof(SVIP_NAT_table_entry_t));
|
|
+ up(sem_nat_tbl_access);
|
|
+ break;
|
|
+
|
|
+ case FIO_SVIP_NAT_RULE_LIST:
|
|
+ {
|
|
+ int len;
|
|
+ char buf[256];
|
|
+
|
|
+ down(sem_nat_tbl_access);
|
|
+ while (nProcReadIdx != -1)
|
|
+ {
|
|
+ len = SVIP_NAT_ProcReadNAT(buf, 256);
|
|
+ if (len > 0)
|
|
+ printk("%s", buf);
|
|
+ }
|
|
+ nProcReadIdx = 0;
|
|
+ up(sem_nat_tbl_access);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ default:
|
|
+ printk(KERN_ERR "SVIP NAT: unsupported ioctl (%x) command for device %s\n",
|
|
+ ioctl_num, PATH_SVIP_NAT_DEVICE_NAME);
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ if (nSize > sizeof(int))
|
|
+ {
|
|
+ if (bRead)
|
|
+ {
|
|
+ if (copy_to_user ((void *)ioctl_param, (void *)pData, nSize) != 0)
|
|
+ {
|
|
+ printk(KERN_ERR "SVIP NAT: ioctl %x: copy_to_user() failed!\n", ioctl_num);
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+error:
|
|
+ if (pData)
|
|
+ kfree(pData);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#if 0
|
|
+void dump_msg(unsigned char *pData, unsigned int nLen)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i=0; i<nLen; i++)
|
|
+ {
|
|
+ if (!i || !(i%16))
|
|
+ printk("\n ");
|
|
+ else if (i && !(i%4))
|
|
+ printk(" ");
|
|
+ printk("%02x", pData[i]);
|
|
+ }
|
|
+ if (--i%16)
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Used to recalculate IP/UDP checksum using the original IP/UDP checksum
|
|
+ coming with the packet. The original source and destination IP addresses
|
|
+ are accounted for, and, the checksum is updated using the new source and
|
|
+ destination IP addresses.
|
|
+
|
|
+ \arguments
|
|
+ skb - pointer to the receiving socket buffer
|
|
+ csum_old - original checksum
|
|
+ saddr_old - pointer to original source IP address
|
|
+ saddr_new - pointer to new source IP address
|
|
+ daddr_old - pointer to original destination IP address
|
|
+ daddr_new - pointer to new destination IP address
|
|
+
|
|
+ \return
|
|
+ recalculated IP/UDP checksum
|
|
+ */
|
|
+/******************************************************************************/
|
|
+static inline u16 ip_udp_quick_csum(u16 csum_old, u16 *saddr_old, u16 *saddr_new,
|
|
+ u16 *daddr_old, u16 *daddr_new)
|
|
+{
|
|
+ u32 sum;
|
|
+
|
|
+ sum = csum_old;
|
|
+
|
|
+ /* convert back from one's complement */
|
|
+ sum = ~sum & 0xffff;
|
|
+
|
|
+ if (sum < saddr_old[0]) sum += 0xffff;
|
|
+ sum -= saddr_old[0];
|
|
+ if (sum < saddr_old[1]) sum += 0xffff;
|
|
+ sum -= saddr_old[1];
|
|
+ if (sum < daddr_old[0]) sum += 0xffff;
|
|
+ sum -= daddr_old[0];
|
|
+ if (sum < daddr_old[1]) sum += 0xffff;
|
|
+ sum -= daddr_old[1];
|
|
+
|
|
+ sum += saddr_new[0];
|
|
+ sum += saddr_new[1];
|
|
+ sum += daddr_new[0];
|
|
+ sum += daddr_new[1];
|
|
+
|
|
+ /* take only 16 bits out of the 32 bit sum and add up the carries */
|
|
+ while (sum >> 16)
|
|
+ sum = (sum & 0xffff)+((sum >> 16) & 0xffff);
|
|
+
|
|
+ /* one's complement the result */
|
|
+ sum = ~sum;
|
|
+
|
|
+ return (u16)(sum & 0xffff);
|
|
+}
|
|
+
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Returns a pointer to an ipv4 address assigned to device dev. The ipv4
|
|
+ instance checked is pointed to by ifa_start. The function is suited for
|
|
+ itterative calls.
|
|
+
|
|
+ \arguments
|
|
+ dev - pointer to network interface
|
|
+ ifa_start - pointer to ipv4 instance to return ipv4 address assigned
|
|
+ to, NULL for the first one
|
|
+ ppifa_addr - output parameter
|
|
+
|
|
+ \return
|
|
+ pointer to the next ipv4 instance, which can be null if ifa_start was
|
|
+ the last instance present
|
|
+ */
|
|
+/******************************************************************************/
|
|
+static struct in_ifaddr *get_ifaddr(struct net_device *dev,
|
|
+ struct in_ifaddr *ifa_start, unsigned int **ppifa_addr)
|
|
+{
|
|
+ struct in_device *in_dev;
|
|
+ struct in_ifaddr *ifa = NULL;
|
|
+
|
|
+ if ((in_dev=in_dev_get(dev)) != NULL)
|
|
+ {
|
|
+ if (ifa_start == NULL)
|
|
+ ifa = in_dev->ifa_list;
|
|
+ else
|
|
+ ifa = ifa_start;
|
|
+ if (ifa)
|
|
+ {
|
|
+ *ppifa_addr = &ifa->ifa_address;
|
|
+ ifa = ifa->ifa_next;
|
|
+ }
|
|
+ in_dev_put(in_dev);
|
|
+ return ifa;
|
|
+ }
|
|
+ *ppifa_addr = NULL;
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ This function performs IP NAT for received packets satisfying the
|
|
+ following requirements:
|
|
+
|
|
+ - packet is destined to local IP host
|
|
+ - transport protocol type is UDP
|
|
+ - destination UDP port is within range
|
|
+
|
|
+ \arguments
|
|
+ skb - pointer to the receiving socket buffer
|
|
+
|
|
+ \return
|
|
+ returns 1 on performed SVIP NAT, else returns 0
|
|
+
|
|
+ \remarks
|
|
+ When function returns 0, it indicates the caller to pass the
|
|
+ packet up the IP stack to make further decision about it
|
|
+ */
|
|
+/******************************************************************************/
|
|
+int do_SVIP_NAT (struct sk_buff *skb)
|
|
+{
|
|
+ struct net_device *real_dev;
|
|
+ struct iphdr *iph;
|
|
+ struct udphdr *udph;
|
|
+ SVIP_NAT_IO_Rule_t *pNatRule;
|
|
+ int nNatIdx, in_eth0, nDir;
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+ int vlan;
|
|
+ unsigned short vid;
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+ SVIP_UDP_PORT_t nPort;
|
|
+ u32 orgSrcIp, orgDstIp, *pSrcIp, *pDstIp;
|
|
+ struct ethhdr *ethh;
|
|
+
|
|
+ /* do not consider if SVIP NAT device not open. */
|
|
+ if (!nDeviceOpen)
|
|
+ {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* consider only UDP packets. */
|
|
+ iph = SVIP_NAT_IP_HDR(skb);
|
|
+ if (iph->protocol != IPPROTO_UDP)
|
|
+ {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
|
|
+ /* consider only packets which UDP port numbers reside within
|
|
+ the predefined SVIP NAT UDP port range. */
|
|
+ if ((!SVIP_PORT_INRANGE(ntohs(udph->dest))) &&
|
|
+ (!SVIP_PORT_INRANGE(ntohs(udph->source))))
|
|
+ {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+ /* check if packet delivered over VLAN. VLAN packets will be routed over
|
|
+ the VLAN interfaces of the respective real Ethernet interface, if one
|
|
+ exists(VIDs must match). Else, the packet will be send out as IEEE 802.3
|
|
+ Ethernet frame */
|
|
+ if (skb->dev->priv_flags & IFF_802_1Q_VLAN)
|
|
+ {
|
|
+ vlan = 1;
|
|
+ vid = VLAN_DEV_VLAN_ID(skb->dev);
|
|
+ real_dev = VLAN_DEV_REAL_DEV(skb->dev);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ vlan = 0;
|
|
+ vid = 0;
|
|
+ real_dev = skb->dev;
|
|
+ }
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+
|
|
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
|
|
+ /** Debugging feature which can be enabled by writing,
|
|
+ 'echo 1 > /proc/net/svip_nat/snifferOnOff'.
|
|
+ It copies all packets received on veth0 and, sends them out over eth0.
|
|
+ When a destination MAC address is specified through
|
|
+ /proc/net/svip_nat/snifferMAC, this MAC addess will substitute the
|
|
+ original MAC address of the packet.
|
|
+ It is recommended to specify a MAC address of some host where Wireshark
|
|
+ runs and sniffs for this traffic, else you may flood your LAN with
|
|
+ undeliverable traffic.
|
|
+
|
|
+NOTE: In case of VLAN traffic the VLAN header information is lost. */
|
|
+ if (nSVIP_NAT_Sniffer)
|
|
+ {
|
|
+ if (real_dev == net_devs[SVIP_NET_DEV_VETH0_IDX])
|
|
+ {
|
|
+ struct sk_buff *copied_skb;
|
|
+
|
|
+ /* gain the Ethernet header from the skb */
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+
|
|
+ copied_skb = skb_copy (skb, GFP_ATOMIC);
|
|
+
|
|
+ if (nSVIP_NAT_SnifferMacSet == 1)
|
|
+ {
|
|
+ ethh = (struct ethhdr *)SVIP_NAT_SKB_MAC_HEADER(copied_skb);
|
|
+ memcpy((char *)ethh->h_dest, (char *)pSVIP_NAT_SnifferMAC, ETH_ALEN);
|
|
+ }
|
|
+ copied_skb->dev = net_devs[SVIP_NET_DEV_ETH0_IDX];
|
|
+ dev_queue_xmit(copied_skb);
|
|
+
|
|
+ /* skip the ETH header again */
|
|
+ skb_pull(skb, ETH_HLEN);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+
|
|
+ /* check if packet arrived on eth0 */
|
|
+ if (real_dev == net_devs[SVIP_NET_DEV_ETH0_IDX])
|
|
+ {
|
|
+ /* check if destination IP address equals the primary assigned IP address
|
|
+ of interface eth0. This is the case of packets originating from a
|
|
+ remote peer that are to be delivered to a channel residing on THIS
|
|
+ voice linecard system. This is typical SVIP NAT case, therefore this
|
|
+ rule is placed on top. */
|
|
+ if (iph->daddr == *paddr_eth0)
|
|
+ {
|
|
+ nPort = ntohs(udph->dest);
|
|
+ nDir = SVIP_NAT_STATS_REM2LOC;
|
|
+ }
|
|
+ /* check if destination IP address equals the secondary assigned IP address
|
|
+ of interface eth0. This is not a typical SVIP NAT case. It is basically
|
|
+ there, as someone might like for debugging purpose to use the LCC to route
|
|
+ Slave SVIP packets which are part of voice/fax streaming. */
|
|
+ else if (iph->daddr == *paddr_eth0_0)
|
|
+ {
|
|
+ nPort = ntohs(udph->source);
|
|
+ nDir = SVIP_NAT_STATS_LOC2REM;
|
|
+ }
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+ /* when the packet did not hit the top two rules, here we check if the packet
|
|
+ has addressed any of the IP addresses assigned to the VLAN interface attached
|
|
+ to eth0. This is not recommended approach because of the CPU cost incurred. */
|
|
+ else if (vlan)
|
|
+ {
|
|
+ unsigned int *pifa_addr;
|
|
+ struct in_ifaddr *ifa_start = NULL;
|
|
+ int i = 0;
|
|
+
|
|
+ do
|
|
+ {
|
|
+ ifa_start = get_ifaddr(skb->dev, ifa_start, &pifa_addr);
|
|
+ if (!pifa_addr)
|
|
+ {
|
|
+ /* VLAN packet received on vlan interface attached to eth0,
|
|
+ however no IP address assigned to the interface.
|
|
+ The packet is ignored. */
|
|
+ return 0;
|
|
+ }
|
|
+ if (iph->daddr == *pifa_addr)
|
|
+ {
|
|
+ /* packet destined to... */
|
|
+ break;
|
|
+ }
|
|
+ if (!ifa_start)
|
|
+ {
|
|
+ return 0;
|
|
+ }
|
|
+ i++;
|
|
+ } while (ifa_start);
|
|
+ if (!i)
|
|
+ {
|
|
+ /* ...primary assigned IP address to the VLAN interface. */
|
|
+ nPort = ntohs(udph->dest);
|
|
+ nDir = SVIP_NAT_STATS_REM2LOC;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* ...secondary assigned IP address to the VLAN interface. */
|
|
+ nPort = ntohs(udph->source);
|
|
+ nDir = SVIP_NAT_STATS_LOC2REM;
|
|
+ }
|
|
+ }
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+ else
|
|
+ {
|
|
+ return 0;
|
|
+ }
|
|
+ in_eth0 = 1;
|
|
+ }
|
|
+ /* check if packet arrived on veth0 */
|
|
+ else if (real_dev == net_devs[SVIP_NET_DEV_VETH0_IDX])
|
|
+ {
|
|
+ nPort = ntohs(udph->source);
|
|
+ nDir = SVIP_NAT_STATS_LOC2REM;
|
|
+ in_eth0 = 0;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* packet arrived neither on eth0, nor veth0 */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* calculate the respective index of the NAT table */
|
|
+ nNatIdx = SVIP_PORT_INDEX(nPort);
|
|
+ /* process the packet if a respective NAT rule exists */
|
|
+ pNatRule = &pNatTable[nNatIdx].natRule;
|
|
+
|
|
+ ethh = (struct ethhdr *)SVIP_NAT_SKB_MAC_HEADER(skb);
|
|
+
|
|
+ /* copy packet's original source and destination IP addresses to use
|
|
+ later on to perform efficient checksum recalculation */
|
|
+ orgSrcIp = iph->saddr;
|
|
+ orgDstIp = iph->daddr;
|
|
+
|
|
+ if (in_eth0)
|
|
+ {
|
|
+ u8 *pDstMac;
|
|
+
|
|
+ /* Process packet arrived on eth0 */
|
|
+
|
|
+ if (nDir == SVIP_NAT_STATS_REM2LOC && iph->saddr == pNatRule->remIP)
|
|
+ {
|
|
+ pDstIp = &pNatRule->locIP;
|
|
+ pDstMac = pNatRule->locMAC;
|
|
+ }
|
|
+ else if (nDir == SVIP_NAT_STATS_LOC2REM && iph->saddr == pNatRule->locIP)
|
|
+ {
|
|
+ pDstIp = &pNatRule->remIP;
|
|
+ pDstMac = pNatRule->remMAC;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Rule check failed. The packet is passed up the layers,
|
|
+ it will be dropped by UDP */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if ((*pDstIp & *pmask_veth0) == (*paddr_veth0 & *pmask_veth0))
|
|
+ {
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+ if (vlan)
|
|
+ {
|
|
+ struct net_device *vlan_dev;
|
|
+
|
|
+ spin_lock_bh(&vlan_group_lock);
|
|
+ vlan_dev = __vlan_find_dev_deep(net_devs[SVIP_NET_DEV_VETH0_IDX], vid);
|
|
+ spin_unlock_bh(&vlan_group_lock);
|
|
+ if (vlan_dev)
|
|
+ {
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ struct vlan_ethhdr *vethh;
|
|
+
|
|
+ skb_push(skb, VLAN_ETH_HLEN);
|
|
+ /* reconstruct the VLAN header.
|
|
+NOTE: priority information is lost */
|
|
+ vethh = (struct vlan_ethhdr *)skb->data;
|
|
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
|
|
+ vethh->h_vlan_TCI = htons(vid);
|
|
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
|
|
+ ethh = (struct ethhdr *)vethh;
|
|
+#else
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+#endif
|
|
+ skb->dev = vlan_dev;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ skb->dev = net_devs[SVIP_NET_DEV_VETH0_IDX];
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+ {
|
|
+ skb->dev = net_devs[SVIP_NET_DEV_VETH0_IDX];
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ }
|
|
+ pSrcIp = paddr_veth0;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ if (vlan)
|
|
+ {
|
|
+ struct vlan_ethhdr *vethh;
|
|
+
|
|
+ /* reconstruct the VLAN header.
|
|
+NOTE: priority information is lost */
|
|
+ skb_push(skb, VLAN_ETH_HLEN);
|
|
+ vethh = (struct vlan_ethhdr *)skb->data;
|
|
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
|
|
+ vethh->h_vlan_TCI = htons(vid);
|
|
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
|
|
+ ethh = (struct ethhdr *)vethh;
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+ {
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ }
|
|
+ /* source IP address equals the destination IP address
|
|
+ of the incoming packet */
|
|
+ pSrcIp = &iph->daddr;
|
|
+ }
|
|
+ iph->saddr = *pSrcIp;
|
|
+ memcpy((char *)ethh->h_source, (char *)skb->dev->dev_addr, ETH_ALEN);
|
|
+ iph->daddr = *pDstIp;
|
|
+ memcpy((char *)ethh->h_dest, (char *)pDstMac, ETH_ALEN);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Process packet arrived on veth0 */
|
|
+
|
|
+ if (iph->saddr != pNatRule->locIP)
|
|
+ {
|
|
+ /* Rule check failed. The packet is passed up the layers,
|
|
+ it will be dropped by UDP */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (!((pNatRule->remIP & *pmask_veth0) == (*paddr_veth0 & *pmask_veth0)))
|
|
+ {
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+ if (vlan)
|
|
+ {
|
|
+ struct net_device *vlan_dev;
|
|
+
|
|
+ spin_lock_bh(&vlan_group_lock);
|
|
+ vlan_dev = __vlan_find_dev_deep(net_devs[SVIP_NET_DEV_ETH0_IDX], vid);
|
|
+ spin_unlock_bh(&vlan_group_lock);
|
|
+ if (vlan_dev)
|
|
+ {
|
|
+ unsigned int *pifa_addr;
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ struct vlan_ethhdr *vethh;
|
|
+
|
|
+ skb_push(skb, VLAN_ETH_HLEN);
|
|
+ /* construct the VLAN header, note priority information is lost */
|
|
+ vethh = (struct vlan_ethhdr *)skb->data;
|
|
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
|
|
+ vethh->h_vlan_TCI = htons(vid);
|
|
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
|
|
+ ethh = (struct ethhdr *)vethh;
|
|
+#else
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+#endif
|
|
+ skb->dev = vlan_dev;
|
|
+
|
|
+ get_ifaddr(skb->dev, NULL, &pifa_addr);
|
|
+ if (pifa_addr)
|
|
+ {
|
|
+ pSrcIp = pifa_addr;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ pSrcIp = paddr_eth0;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ skb->dev = net_devs[SVIP_NET_DEV_ETH0_IDX];
|
|
+ pSrcIp = paddr_eth0;
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+ {
|
|
+ skb->dev = net_devs[SVIP_NET_DEV_ETH0_IDX];
|
|
+ pSrcIp = paddr_eth0;
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ pSrcIp = paddr_veth0;
|
|
+#ifndef VLAN_8021Q_UNUSED
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+ if (vlan)
|
|
+ {
|
|
+ struct vlan_ethhdr *vethh;
|
|
+
|
|
+ skb_push(skb, VLAN_ETH_HLEN);
|
|
+ /* reconstruct the VLAN header.
|
|
+NOTE: priority information is lost. */
|
|
+ vethh = (struct vlan_ethhdr *)skb->data;
|
|
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
|
|
+ vethh->h_vlan_TCI = htons(vid);
|
|
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
|
|
+ ethh = (struct ethhdr *)vethh;
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+#endif /* ! VLAN_8021Q_UNUSED */
|
|
+ {
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ }
|
|
+ }
|
|
+ iph->saddr = *pSrcIp;
|
|
+ memcpy((char *)ethh->h_source, (char *)skb->dev->dev_addr, ETH_ALEN);
|
|
+ iph->daddr = pNatRule->remIP;
|
|
+ memcpy((char *)ethh->h_dest, (char *)pNatRule->remMAC, ETH_ALEN);
|
|
+ }
|
|
+ pNatTable[nNatIdx].natStats[nDir].inPackets++;
|
|
+
|
|
+ iph->check = ip_udp_quick_csum(iph->check, (u16 *)&orgSrcIp, (u16 *)&iph->saddr,
|
|
+ (u16 *)&orgDstIp, (u16 *)&iph->daddr);
|
|
+ if (udph->check != 0)
|
|
+ {
|
|
+ udph->check = ip_udp_quick_csum(udph->check, (u16 *)&orgSrcIp, (u16 *)&iph->saddr,
|
|
+ (u16 *)&orgDstIp, (u16 *)&iph->daddr);
|
|
+ }
|
|
+
|
|
+ /* write the packet out, directly to the network device */
|
|
+ if (dev_queue_xmit(skb) < 0)
|
|
+ pNatTable[nNatIdx].natStats[nDir].outErrors++;
|
|
+ else
|
|
+ pNatTable[nNatIdx].natStats[nDir].outPackets++;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Function executed upon unloading of the SVIP NAT module. It unregisters the
|
|
+ SVIP NAT configuration device and frees the memory used for the NAT table.
|
|
+
|
|
+ \remarks:
|
|
+ Currently the SVIP NAT module is statically linked into the Linux kernel
|
|
+ therefore this routine cannot be executed.
|
|
+ *******************************************************************************/
|
|
+static int __init init(void)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct net_device *dev;
|
|
+
|
|
+ if (misc_register(&SVIP_NAT_miscdev) != 0)
|
|
+ {
|
|
+ printk(KERN_ERR "%s: cannot register SVIP NAT device node.\n",
|
|
+ SVIP_NAT_miscdev.name);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ /* allocation of memory for NAT table */
|
|
+ pNatTable = (SVIP_NAT_table_entry_t *)kmalloc(
|
|
+ sizeof(SVIP_NAT_table_entry_t) * SVIP_SYS_CODEC_NUM, GFP_ATOMIC);
|
|
+ if (pNatTable == NULL)
|
|
+ {
|
|
+ printk (KERN_ERR "SVIP NAT: Error(%d), allocating memory for NAT table\n", ret);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /* clear the NAT table */
|
|
+ memset((void *)pNatTable, 0, sizeof(SVIP_NAT_table_entry_t) * SVIP_SYS_CODEC_NUM);
|
|
+
|
|
+ if ((sem_nat_tbl_access = kmalloc(sizeof(struct semaphore), GFP_KERNEL)))
|
|
+ {
|
|
+ sema_init(sem_nat_tbl_access, 1);
|
|
+ }
|
|
+
|
|
+ SVIP_NAT_ProcInstall();
|
|
+
|
|
+ /* find pointers to 'struct net_device' of eth0 and veth0, respectevely */
|
|
+ read_lock(&dev_base_lock);
|
|
+ SVIP_NAT_FOR_EACH_NETDEV(dev)
|
|
+ {
|
|
+ if (!strcmp(dev->name, SVIP_NET_DEV_ETH0_NAME))
|
|
+ {
|
|
+ net_devs[SVIP_NET_DEV_ETH0_IDX] = dev;
|
|
+ }
|
|
+ if (!strcmp(dev->name, SVIP_NET_DEV_VETH1_NAME))
|
|
+ {
|
|
+ net_devs[SVIP_NET_DEV_VETH0_IDX] = dev;
|
|
+ }
|
|
+ else if (!strcmp(dev->name, SVIP_NET_DEV_ETH1_NAME))
|
|
+ {
|
|
+ net_devs[SVIP_NET_DEV_VETH0_IDX] = dev;
|
|
+ }
|
|
+ }
|
|
+ read_unlock(&dev_base_lock);
|
|
+
|
|
+ if (net_devs[SVIP_NET_DEV_ETH0_IDX] == NULL ||
|
|
+ net_devs[SVIP_NET_DEV_VETH0_IDX] == NULL)
|
|
+ {
|
|
+ printk (KERN_ERR "SVIP NAT: Error, unable to locate eth0 and veth0 interfaces\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ printk ("%s, (c) 2009, Lantiq Deutschland GmbH\n", &SVIP_NAT_INFO_STR[4]);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/******************************************************************************/
|
|
+/**
|
|
+ Function executed upon unloading of the SVIP NAT module. It unregisters the
|
|
+ SVIP NAT configuration device and frees the memory used for the NAT table.
|
|
+
|
|
+ \remarks:
|
|
+ Currently the SVIP NAT module is statically linked into the Linux kernel
|
|
+ therefore this routine cannot be executed.
|
|
+ *******************************************************************************/
|
|
+static void __exit fini(void)
|
|
+{
|
|
+ MOD_DEC_USE_COUNT;
|
|
+
|
|
+ /* unregister SVIP NAT configuration device */
|
|
+ misc_deregister(&SVIP_NAT_miscdev);
|
|
+
|
|
+ /* release memory of SVIP NAT table */
|
|
+ if (pNatTable != NULL)
|
|
+ {
|
|
+ kfree (pNatTable);
|
|
+ }
|
|
+}
|
|
+
|
|
+module_init(init);
|
|
+module_exit(fini);
|
|
Index: linux-3.3.8/drivers/spi/Kconfig
|
|
===================================================================
|
|
--- linux-3.3.8.orig/drivers/spi/Kconfig 2012-07-31 19:51:33.077105873 +0200
|
|
+++ linux-3.3.8/drivers/spi/Kconfig 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -366,6 +366,11 @@
|
|
This driver also supports the ML7213/ML7223/ML7831, a companion chip
|
|
for the Atom E6xx series and compatible with the Intel EG20T PCH.
|
|
|
|
+config SPI_SVIP
|
|
+ tristate "SVIP SPI controller"
|
|
+ depends on SOC_SVIP
|
|
+ default y
|
|
+
|
|
config SPI_TXX9
|
|
tristate "Toshiba TXx9 SPI controller"
|
|
depends on GENERIC_GPIO && CPU_TX49XX
|
|
Index: linux-3.3.8/drivers/spi/Makefile
|
|
===================================================================
|
|
--- linux-3.3.8.orig/drivers/spi/Makefile 2012-07-31 19:51:33.077105873 +0200
|
|
+++ linux-3.3.8/drivers/spi/Makefile 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -61,4 +61,5 @@
|
|
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
|
|
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
|
|
obj-$(CONFIG_SPI_XWAY) += spi-xway.o
|
|
+obj-$(CONFIG_SPI_SVIP) += spi_svip.o
|
|
|
|
Index: linux-3.3.8/net/ipv4/Kconfig
|
|
===================================================================
|
|
--- linux-3.3.8.orig/net/ipv4/Kconfig 2012-06-01 09:16:13.000000000 +0200
|
|
+++ linux-3.3.8/net/ipv4/Kconfig 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -630,3 +630,10 @@
|
|
on the Internet.
|
|
|
|
If unsure, say N.
|
|
+
|
|
+config SVIP_NAT
|
|
+ bool "Include SVIP NAT"
|
|
+ depends on SOC_SVIP
|
|
+ default y
|
|
+ ---help---
|
|
+ Include the SVIP NAT.
|
|
Index: linux-3.3.8/net/ipv4/Makefile
|
|
===================================================================
|
|
--- linux-3.3.8.orig/net/ipv4/Makefile 2012-07-31 19:51:33.401105887 +0200
|
|
+++ linux-3.3.8/net/ipv4/Makefile 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -56,3 +56,4 @@
|
|
|
|
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
|
|
xfrm4_output.o
|
|
+obj-$(CONFIG_SVIP_NAT) += svip_nat.o
|
|
Index: linux-3.3.8/arch/mips/lantiq/svip/switchip_setup.c
|
|
===================================================================
|
|
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
|
|
+++ linux-3.3.8/arch/mips/lantiq/svip/switchip_setup.c 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -0,0 +1,666 @@
|
|
+/******************************************************************************
|
|
+ Copyright (c) 2007, Infineon Technologies. All rights reserved.
|
|
+
|
|
+ No Warranty
|
|
+ Because the program is licensed free of charge, there is no warranty for
|
|
+ the program, to the extent permitted by applicable law. Except when
|
|
+ otherwise stated in writing the copyright holders and/or other parties
|
|
+ provide the program "as is" without warranty of any kind, either
|
|
+ expressed or implied, including, but not limited to, the implied
|
|
+ warranties of merchantability and fitness for a particular purpose. The
|
|
+ entire risk as to the quality and performance of the program is with
|
|
+ you. should the program prove defective, you assume the cost of all
|
|
+ necessary servicing, repair or correction.
|
|
+
|
|
+ In no event unless required by applicable law or agreed to in writing
|
|
+ will any copyright holder, or any other party who may modify and/or
|
|
+ redistribute the program as permitted above, be liable to you for
|
|
+ damages, including any general, special, incidental or consequential
|
|
+ damages arising out of the use or inability to use the program
|
|
+ (including but not limited to loss of data or data being rendered
|
|
+ inaccurate or losses sustained by you or third parties or a failure of
|
|
+ the program to operate with any other programs), even if such holder or
|
|
+ other party has been advised of the possibility of such damages.
|
|
+ ******************************************************************************
|
|
+ Module : switchip_setup.c
|
|
+ Date : 2007-11-09
|
|
+ Description : Basic setup of embedded ethernet switch "SwitchIP"
|
|
+ Remarks: andreas.schmidt@infineon.com
|
|
+
|
|
+ *****************************************************************************/
|
|
+
|
|
+/* TODO: get rid of #ifdef CONFIG_LANTIQ_MACH_EASY336 */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/time.h>
|
|
+
|
|
+#include <base_reg.h>
|
|
+#include <es_reg.h>
|
|
+#include <sys1_reg.h>
|
|
+#include <dma_reg.h>
|
|
+#include <lantiq_soc.h>
|
|
+
|
|
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
|
|
+static struct svip_reg_es *const es = (struct svip_reg_es *)LTQ_ES_BASE;
|
|
+
|
|
+/* PHY Organizationally Unique Identifier (OUI) */
|
|
+#define PHY_OUI_PMC 0x00E004
|
|
+#define PHY_OUI_VITESSE 0x008083
|
|
+#define PHY_OUI_DEFAULT 0xFFFFFF
|
|
+
|
|
+unsigned short switchip_phy_read(unsigned int phyaddr, unsigned int regaddr);
|
|
+void switchip_phy_write(unsigned int phyaddr, unsigned int regaddr,
|
|
+ unsigned short data);
|
|
+
|
|
+static int phy_address[2] = {0, 1};
|
|
+static u32 phy_oui;
|
|
+static void switchip_mdio_poll_init(void);
|
|
+static void _switchip_mdio_poll(struct work_struct *work);
|
|
+
|
|
+/* struct workqueue_struct mdio_poll_task; */
|
|
+static struct workqueue_struct *mdio_poll_workqueue;
|
|
+DECLARE_DELAYED_WORK(mdio_poll_work, _switchip_mdio_poll);
|
|
+static int old_link_status[2] = {-1, -1};
|
|
+
|
|
+/**
|
|
+ * Autonegotiation check.
|
|
+ * This funtion checks for link changes. If a link change has occured it will
|
|
+ * update certain switch registers.
|
|
+ */
|
|
+static void _switchip_check_phy_status(int port)
|
|
+{
|
|
+ int new_link_status;
|
|
+ unsigned short reg1;
|
|
+
|
|
+ reg1 = switchip_phy_read(phy_address[port], 1);
|
|
+ if ((reg1 == 0xFFFF) || (reg1 == 0x0000))
|
|
+ return; /* no PHY connected */
|
|
+
|
|
+ new_link_status = reg1 & 4;
|
|
+ if (old_link_status[port] ^ new_link_status) {
|
|
+ /* link status change */
|
|
+ if (!new_link_status) {
|
|
+ if (port == 0)
|
|
+ es_w32_mask(LTQ_ES_P0_CTL_REG_FLP, 0, p0_ctl);
|
|
+ else
|
|
+ es_w32_mask(LTQ_ES_P0_CTL_REG_FLP, 0, p1_ctl);
|
|
+
|
|
+ /* read again; link bit is latched low! */
|
|
+ reg1 = switchip_phy_read(phy_address[port], 1);
|
|
+ new_link_status = reg1 & 4;
|
|
+ }
|
|
+
|
|
+ if (new_link_status) {
|
|
+ unsigned short reg0, reg4, reg5, reg9, reg10;
|
|
+ int phy_pause, phy_speed, phy_duplex;
|
|
+ int aneg_enable, aneg_cmpt;
|
|
+
|
|
+ reg0 = switchip_phy_read(phy_address[port], 0);
|
|
+ reg4 = switchip_phy_read(phy_address[port], 4);
|
|
+ aneg_enable = reg0 & 0x1000;
|
|
+ aneg_cmpt = reg1 & 0x20;
|
|
+
|
|
+ if (aneg_enable && aneg_cmpt) {
|
|
+ reg5 = switchip_phy_read(phy_address[port], 5);
|
|
+ switch (phy_oui) {
|
|
+#ifdef CONFIG_LANTIQ_MACH_EASY336
|
|
+ case PHY_OUI_PMC:
|
|
+ /* PMC Sierra supports 1Gigabit FD,
|
|
+ * only. On successful
|
|
+ * auto-negotiation, we are sure this
|
|
+ * is what the LP can. */
|
|
+ phy_pause = ((reg4 & reg5) & 0x0080) >> 7;
|
|
+ phy_speed = 2;
|
|
+ phy_duplex = 1;
|
|
+ break;
|
|
+#endif
|
|
+ case PHY_OUI_VITESSE:
|
|
+ case PHY_OUI_DEFAULT:
|
|
+ reg9 = switchip_phy_read(phy_address[port], 9);
|
|
+ reg10 = switchip_phy_read(phy_address[port], 10);
|
|
+
|
|
+ /* Check if advertise and partner
|
|
+ * agree on pause */
|
|
+ phy_pause = ((reg4 & reg5) & 0x0400) >> 10;
|
|
+
|
|
+ /* Find the best mode both partners
|
|
+ * support
|
|
+ * Priority: 1GB-FD, 1GB-HD, 100MB-FD,
|
|
+ * 100MB-HD, 10MB-FD, 10MB-HD */
|
|
+ phy_speed = ((((reg9<<2) & reg10)
|
|
+ & 0x0c00) >> 6) |
|
|
+ (((reg4 & reg5) & 0x01e0) >> 5);
|
|
+
|
|
+ if (phy_speed >= 0x0020) {
|
|
+ phy_speed = 2;
|
|
+ phy_duplex = 1;
|
|
+ } else if (phy_speed >= 0x0010) {
|
|
+ phy_speed = 2;
|
|
+ phy_duplex = 0;
|
|
+ } else if (phy_speed >= 0x0008) {
|
|
+ phy_speed = 1;
|
|
+ phy_duplex = 1;
|
|
+ } else if (phy_speed >= 0x0004) {
|
|
+ phy_speed = 1;
|
|
+ phy_duplex = 0;
|
|
+ } else if (phy_speed >= 0x0002) {
|
|
+ phy_speed = 0;
|
|
+ phy_duplex = 1;
|
|
+ } else {
|
|
+ phy_speed = 0;
|
|
+ phy_duplex = 0;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ phy_pause = (reg4 & 0x0400) >> 10;
|
|
+ phy_speed = (reg0 & 0x40 ? 2 : (reg0 >> 13)&1);
|
|
+ phy_duplex = (reg0 >> 8)&1;
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ /* parallel detection or fixed speed */
|
|
+ phy_pause = (reg4 & 0x0400) >> 10;
|
|
+ phy_speed = (reg0 & 0x40 ? 2 : (reg0 >> 13)&1);
|
|
+ phy_duplex = (reg0 >> 8)&1;
|
|
+ }
|
|
+
|
|
+ if (port == 0) {
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0SPD,
|
|
+ LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(phy_speed),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0DUP,
|
|
+ LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(phy_duplex),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0FCE,
|
|
+ LTQ_ES_RGMII_CTL_REG_P0FCE_VAL(phy_pause),
|
|
+ rgmii_ctl);
|
|
+
|
|
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_FLP, p0_ctl);
|
|
+ } else {
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1SPD,
|
|
+ LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(phy_speed),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1DUP,
|
|
+ LTQ_ES_RGMII_CTL_REG_P1DUP_VAL(phy_duplex),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1FCE,
|
|
+ LTQ_ES_RGMII_CTL_REG_P0FCE_VAL(phy_pause),
|
|
+ rgmii_ctl);
|
|
+
|
|
+ es_w32_mask(1, LTQ_ES_P0_CTL_REG_FLP, p1_ctl);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ old_link_status[port] = new_link_status;
|
|
+}
|
|
+
|
|
+static void _switchip_mdio_poll(struct work_struct *work)
|
|
+{
|
|
+ if (es_r32(sw_gctl0) & LTQ_ES_SW_GCTL0_REG_SE) {
|
|
+ _switchip_check_phy_status(0);
|
|
+ _switchip_check_phy_status(1);
|
|
+ }
|
|
+
|
|
+ queue_delayed_work(mdio_poll_workqueue, &mdio_poll_work, HZ/2);
|
|
+}
|
|
+
|
|
+static void switchip_mdio_poll_init(void)
|
|
+{
|
|
+ mdio_poll_workqueue = create_workqueue("SVIP MDIP poll");
|
|
+ INIT_DELAYED_WORK(&mdio_poll_work, _switchip_mdio_poll);
|
|
+
|
|
+ queue_delayed_work(mdio_poll_workqueue, &mdio_poll_work, HZ/2);
|
|
+
|
|
+}
|
|
+
|
|
+unsigned short switchip_phy_read(unsigned int phyaddr, unsigned int regaddr)
|
|
+{
|
|
+ /* TODO: protect MDIO access with semaphore */
|
|
+ es_w32(LTQ_ES_MDIO_CTL_REG_MBUSY
|
|
+ | LTQ_ES_MDIO_CTL_REG_OP_VAL(2) /* read operation */
|
|
+ | LTQ_ES_MDIO_CTL_REG_PHYAD_VAL(phyaddr)
|
|
+ | LTQ_ES_MDIO_CTL_REG_REGAD_VAL(regaddr), mdio_ctl);
|
|
+ while (es_r32(mdio_ctl) & LTQ_ES_MDIO_CTL_REG_MBUSY);
|
|
+
|
|
+ return es_r32(mdio_data) & 0xFFFF;
|
|
+}
|
|
+EXPORT_SYMBOL(switchip_phy_read);
|
|
+
|
|
+void switchip_phy_write(unsigned int phyaddr, unsigned int regaddr,
|
|
+ unsigned short data)
|
|
+{
|
|
+ /* TODO: protect MDIO access with semaphore */
|
|
+ es_w32(LTQ_ES_MDIO_CTL_REG_WD_VAL(data)
|
|
+ | LTQ_ES_MDIO_CTL_REG_MBUSY
|
|
+ | LTQ_ES_MDIO_CTL_REG_OP_VAL(1) /* write operation */
|
|
+ | LTQ_ES_MDIO_CTL_REG_PHYAD_VAL(phyaddr)
|
|
+ | LTQ_ES_MDIO_CTL_REG_REGAD_VAL(regaddr), mdio_ctl);
|
|
+ while (es_r32(mdio_ctl) & LTQ_ES_MDIO_CTL_REG_MBUSY);
|
|
+
|
|
+ return;
|
|
+}
|
|
+EXPORT_SYMBOL(switchip_phy_write);
|
|
+
|
|
+const static u32 switch_reset_offset_000[] = {
|
|
+ /*b8000000:*/ 0xffffffff, 0x00000001, 0x00000001, 0x00000003,
|
|
+ /*b8000010:*/ 0x04070001, 0x04070001, 0x04070001, 0xffffffff,
|
|
+ /*b8000020:*/ 0x00001be8, 0x00001be8, 0x00001be8, 0xffffffff,
|
|
+ /*b8000030:*/ 0x00000000, 0x00000000, 0x00080004, 0x00020001,
|
|
+ /*b8000040:*/ 0x00000000, 0x00000000, 0x00080004, 0x00020001,
|
|
+ /*b8000050:*/ 0x00000000, 0x00000000, 0x00080004, 0x00020001,
|
|
+ /*b8000060:*/ 0x00000000, 0x00000000, 0x00081000, 0x001f7777,
|
|
+ /*b8000070:*/ 0x00000000, 0x00000000, 0x0c00ac2b, 0x0000fa50,
|
|
+ /*b8000080:*/ 0x00001000, 0x00001800, 0x00000000, 0x00000000,
|
|
+ /*b8000090:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b80000a0:*/ 0x00000000, 0x00000050, 0x00000010, 0x00000000,
|
|
+ /*b80000b0:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b80000c0:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b80000d0:*/ 0xffffffff, 0x00000000, 0x00000000
|
|
+};
|
|
+const static u32 switch_reset_offset_100[] = {
|
|
+ /*b8000100:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000110:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000120:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000130:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000140:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000150:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000160:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000170:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000180:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b8000190:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b80001a0:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
|
+ /*b80001b0:*/ 0x00000000, 0x00000000
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Switch Reset.
|
|
+ */
|
|
+void switchip_reset(void)
|
|
+{
|
|
+ volatile unsigned int *reg;
|
|
+ volatile unsigned int rdreg;
|
|
+ int i;
|
|
+
|
|
+ sys1_w32(SYS1_CLKENR_ETHSW, clkenr);
|
|
+ asm("sync");
|
|
+
|
|
+ /* disable P0 */
|
|
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_SPS_VAL(1), p0_ctl);
|
|
+ /* disable P1 */
|
|
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_SPS_VAL(1), p1_ctl);
|
|
+ /* disable P2 */
|
|
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_SPS_VAL(1), p2_ctl);
|
|
+
|
|
+ /**************************************
|
|
+ * BEGIN: Procedure to clear MAC table
|
|
+ **************************************/
|
|
+ for (i = 0; i < 3; i++) {
|
|
+ int result;
|
|
+
|
|
+ /* check if access engine is available */
|
|
+ while (es_r32(adr_tb_st2) & LTQ_ES_ADR_TB_ST2_REG_BUSY);
|
|
+
|
|
+ /* initialise to first address */
|
|
+ es_w32(LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(3)
|
|
+ | LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(0), adr_tb_ctl2);
|
|
+
|
|
+ /* wait while busy */
|
|
+ while (es_r32(adr_tb_st2) & LTQ_ES_ADR_TB_ST2_REG_BUSY);
|
|
+
|
|
+ /* setup the portmap */
|
|
+ es_w32_mask(0, LTQ_ES_ADR_TB_CTL1_REG_PMAP_VAL(1 << i),
|
|
+ adr_tb_ctl1);
|
|
+
|
|
+ do {
|
|
+ /* search for addresses by port */
|
|
+ es_w32(LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(2)
|
|
+ | LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(9), adr_tb_ctl2);
|
|
+
|
|
+ /* wait while busy */
|
|
+ while (es_r32(adr_tb_st2) & LTQ_ES_ADR_TB_ST2_REG_BUSY);
|
|
+
|
|
+ result = LTQ_ES_ADR_TB_ST2_REG_RSLT_GET(es_r32(adr_tb_st2));
|
|
+ if (result == 0x101) {
|
|
+ printk(KERN_ERR "%s, cmd error\n", __func__);
|
|
+ return;
|
|
+ }
|
|
+ /* if Command OK, address found... */
|
|
+ if (result == 0) {
|
|
+ unsigned char mac[6];
|
|
+
|
|
+ mac[5] = (es_r32(adr_tb_st0) >> 0) & 0xff;
|
|
+ mac[4] = (es_r32(adr_tb_st0) >> 8) & 0xff;
|
|
+ mac[3] = (es_r32(adr_tb_st0) >> 16) & 0xff;
|
|
+ mac[2] = (es_r32(adr_tb_st0) >> 24) & 0xff;
|
|
+ mac[1] = (es_r32(adr_tb_st1) >> 0) & 0xff;
|
|
+ mac[0] = (es_r32(adr_tb_st1) >> 8) & 0xff;
|
|
+
|
|
+ /* setup address */
|
|
+ es_w32((mac[5] << 0) |
|
|
+ (mac[4] << 8) |
|
|
+ (mac[3] << 16) |
|
|
+ (mac[2] << 24), adr_tb_ctl0);
|
|
+ es_w32(LTQ_ES_ADR_TB_CTL1_REG_PMAP_VAL(1<<i) |
|
|
+ LTQ_ES_ADR_TB_CTL1_REG_FID_VAL(0) |
|
|
+ (mac[0] << 8) |
|
|
+ (mac[1] << 0), adr_tb_ctl1);
|
|
+ /* erase address */
|
|
+
|
|
+ es_w32(LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(1) |
|
|
+ LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(15),
|
|
+ adr_tb_ctl2);
|
|
+
|
|
+ /* wait, while busy */
|
|
+ while (es_r32(adr_tb_st2) &
|
|
+ LTQ_ES_ADR_TB_ST2_REG_BUSY);
|
|
+ }
|
|
+ } while (result == 0);
|
|
+ }
|
|
+ /**************************************
|
|
+ * END: Procedure to clear MAC table
|
|
+ **************************************/
|
|
+
|
|
+ /* reset RMON counters */
|
|
+ es_w32(LTQ_ES_RMON_CTL_REG_BAS | LTQ_ES_RMON_CTL_REG_CAC_VAL(3),
|
|
+ rmon_ctl);
|
|
+
|
|
+ /* bring all registers to reset state */
|
|
+ reg = LTQ_ES_PS_REG;
|
|
+ for (i = 0; i < ARRAY_SIZE(switch_reset_offset_000); i++) {
|
|
+ if ((reg == LTQ_ES_PS_REG) ||
|
|
+ (reg >= LTQ_ES_ADR_TB_CTL0_REG &&
|
|
+ reg <= LTQ_ES_ADR_TB_ST2_REG))
|
|
+ continue;
|
|
+
|
|
+ if (switch_reset_offset_000[i] != 0xFFFFFFFF) {
|
|
+ /* write reset value to register */
|
|
+ *reg = switch_reset_offset_000[i];
|
|
+ /* read register value back */
|
|
+ rdreg = *reg;
|
|
+ if (reg == LTQ_ES_SW_GCTL1_REG)
|
|
+ rdreg &= ~LTQ_ES_SW_GCTL1_REG_BISTDN;
|
|
+ /* compare read value with written one */
|
|
+ if (rdreg != switch_reset_offset_000[i]) {
|
|
+ printk(KERN_ERR "%s,%d: reg %08x mismatch "
|
|
+ "[has:%08x, expect:%08x]\n",
|
|
+ __func__, __LINE__,
|
|
+ (unsigned int)reg, rdreg,
|
|
+ switch_reset_offset_000[i]);
|
|
+ }
|
|
+ }
|
|
+ reg++;
|
|
+ }
|
|
+
|
|
+ reg = LTQ_ES_VLAN_FLT0_REG;
|
|
+ for (i = 0; i < ARRAY_SIZE(switch_reset_offset_100); i++) {
|
|
+ *reg = switch_reset_offset_100[i];
|
|
+ rdreg = *reg;
|
|
+ if (rdreg != switch_reset_offset_100[i]) {
|
|
+ printk(KERN_ERR "%s,%d: reg %08x mismatch "
|
|
+ "[has:%08x, expect:%08x]\n", __func__, __LINE__,
|
|
+ (unsigned int)reg, rdreg,
|
|
+ switch_reset_offset_100[i]);
|
|
+ }
|
|
+ reg++;
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL(switchip_reset);
|
|
+
|
|
+static u32 get_phy_oui(unsigned char phy_addr)
|
|
+{
|
|
+ u32 oui;
|
|
+ int i, bit, byte, shift, w;
|
|
+ u16 reg_id[2];
|
|
+
|
|
+ /* read PHY identifier registers 1 and 2 */
|
|
+ reg_id[0] = switchip_phy_read(phy_addr, 2);
|
|
+ reg_id[1] = switchip_phy_read(phy_addr, 3);
|
|
+
|
|
+ oui = 0;
|
|
+ w = 1;
|
|
+ shift = 7;
|
|
+ byte = 1;
|
|
+ for (i = 0, bit = 10; i <= 21; i++, bit++) {
|
|
+ oui |= ((reg_id[w] & (1<<bit)) ? 1 : 0) << shift;
|
|
+ if (!(shift % 8)) {
|
|
+ byte++;
|
|
+ if (byte == 2)
|
|
+ shift = 15;
|
|
+ else
|
|
+ shift = 21;
|
|
+ } else {
|
|
+ shift--;
|
|
+ }
|
|
+ if (w == 1 && bit == 15) {
|
|
+ bit = -1;
|
|
+ w = 0;
|
|
+ }
|
|
+ }
|
|
+ return oui;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Switch Initialization.
|
|
+ */
|
|
+int switchip_init(void)
|
|
+{
|
|
+ int eth_port, phy_present = 0;
|
|
+ u16 reg, mode;
|
|
+
|
|
+ sys1_w32(SYS1_CLKENR_ETHSW, clkenr);
|
|
+ asm("sync");
|
|
+
|
|
+ /* Enable Switch, if not already done so */
|
|
+ if ((es_r32(sw_gctl0) & LTQ_ES_SW_GCTL0_REG_SE) == 0)
|
|
+ es_w32_mask(0, LTQ_ES_SW_GCTL0_REG_SE, sw_gctl0);
|
|
+ /* Wait for completion of MBIST */
|
|
+ while (LTQ_ES_SW_GCTL1_REG_BISTDN_GET(es_r32(sw_gctl1)) == 0);
|
|
+
|
|
+ switchip_reset();
|
|
+
|
|
+ mode = LTQ_ES_RGMII_CTL_REG_IS_GET(es_r32(rgmii_ctl));
|
|
+ eth_port = (mode == 2 ? 1 : 0);
|
|
+
|
|
+ /* Set the primary port(port toward backplane) as sniffer port,
|
|
+ changing from P2 which is the reset setting */
|
|
+ es_w32_mask(LTQ_ES_SW_GCTL0_REG_SNIFFPN,
|
|
+ LTQ_ES_SW_GCTL0_REG_SNIFFPN_VAL(eth_port),
|
|
+ sw_gctl0);
|
|
+
|
|
+ /* Point MDIO state machine to invalid PHY addresses 8 and 9 */
|
|
+ es_w32_mask(0, LTQ_ES_SW_GCTL0_REG_PHYBA, sw_gctl0);
|
|
+
|
|
+ /* Add CRC for packets from DMA to PMAC.
|
|
+ Remove CRC for packets from PMAC to DMA. */
|
|
+ es_w32(LTQ_ES_PMAC_HD_CTL_RC | LTQ_ES_PMAC_HD_CTL_AC, pmac_hd_ctl);
|
|
+
|
|
+ phy_oui = get_phy_oui(0);
|
|
+ switch (phy_oui) {
|
|
+#ifdef CONFIG_LANTIQ_MACH_EASY336
|
|
+ case PHY_OUI_PMC:
|
|
+ phy_address[0] = (mode == 2 ? -1 : 2);
|
|
+ phy_address[1] = (mode == 2 ? 2 : -1);
|
|
+ break;
|
|
+#endif
|
|
+ case PHY_OUI_VITESSE:
|
|
+ default:
|
|
+ phy_oui = PHY_OUI_DEFAULT;
|
|
+ phy_address[0] = (mode == 2 ? 1 : 0);
|
|
+ phy_address[1] = (mode == 2 ? 0 : 1);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /****** PORT 0 *****/
|
|
+ reg = switchip_phy_read(phy_address[0], 1);
|
|
+ if ((reg != 0x0000) && (reg != 0xffff)) {
|
|
+ /* PHY connected? */
|
|
+ phy_present |= 1;
|
|
+ /* Set Rx- and TxDelay in case of RGMII */
|
|
+ switch (mode) {
|
|
+ case 0: /* *RGMII,RGMII */
|
|
+ case 2: /* RGMII,*GMII */
|
|
+ /* program clock delay in PHY, not in SVIP */
|
|
+
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0RDLY, 0, rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0TDLY, 0, rgmii_ctl);
|
|
+ if (phy_oui == PHY_OUI_VITESSE ||
|
|
+ phy_oui == PHY_OUI_DEFAULT) {
|
|
+ switchip_phy_write(phy_address[0], 31, 0x0001);
|
|
+ switchip_phy_write(phy_address[0], 28, 0xA000);
|
|
+ switchip_phy_write(phy_address[0], 31, 0x0000);
|
|
+ }
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ if (phy_oui == PHY_OUI_VITESSE ||
|
|
+ phy_oui == PHY_OUI_DEFAULT) {
|
|
+ /* Program PHY advertisements and
|
|
+ * restart auto-negotiation */
|
|
+ switchip_phy_write(phy_address[0], 4, 0x05E1);
|
|
+ switchip_phy_write(phy_address[0], 9, 0x0300);
|
|
+ switchip_phy_write(phy_address[0], 0, 0x3300);
|
|
+ } else {
|
|
+ reg = switchip_phy_read(phy_address[1], 0);
|
|
+ reg |= 0x1000; /* auto-negotiation enable */
|
|
+ switchip_phy_write(phy_address[1], 0, reg);
|
|
+ reg |= 0x0200; /* auto-negotiation restart */
|
|
+ switchip_phy_write(phy_address[1], 0, reg);
|
|
+ }
|
|
+ } else {
|
|
+ /* Force SWITCH link with highest capability:
|
|
+ * 100M FD for MII
|
|
+ * 1G FD for GMII/RGMII
|
|
+ */
|
|
+ switch (mode) {
|
|
+ case 1: /* *MII,MII */
|
|
+ case 3: /* *MII,RGMII */
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(1),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(1),
|
|
+ rgmii_ctl);
|
|
+ break;
|
|
+ case 0: /* *RGMII,RGMII */
|
|
+ case 2: /* RGMII,*GMII */
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(2),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(1),
|
|
+ rgmii_ctl);
|
|
+
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0RDLY, 0, rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0TDLY_VAL(2),
|
|
+ rgmii_ctl);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_FLP, p0_ctl);
|
|
+ }
|
|
+
|
|
+ /****** PORT 1 *****/
|
|
+ reg = switchip_phy_read(phy_address[1], 1);
|
|
+ if ((reg != 0x0000) && (reg != 0xffff)) {
|
|
+ /* PHY connected? */
|
|
+ phy_present |= 2;
|
|
+ /* Set Rx- and TxDelay in case of RGMII */
|
|
+ switch (mode) {
|
|
+ case 0: /* *RGMII,RGMII */
|
|
+ case 3: /* *MII,RGMII */
|
|
+ /* program clock delay in PHY, not in SVIP */
|
|
+
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1RDLY, 0, rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1TDLY, 0, rgmii_ctl);
|
|
+ if (phy_oui == PHY_OUI_VITESSE ||
|
|
+ phy_oui == PHY_OUI_DEFAULT) {
|
|
+ switchip_phy_write(phy_address[1], 31, 0x0001);
|
|
+ switchip_phy_write(phy_address[1], 28, 0xA000);
|
|
+ switchip_phy_write(phy_address[1], 31, 0x0000);
|
|
+ }
|
|
+ break;
|
|
+ case 2: /* RGMII,*GMII */
|
|
+
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(2),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
|
|
+#ifdef CONFIG_LANTIQ_MACH_EASY336
|
|
+ if (phy_oui == PHY_OUI_PMC) {
|
|
+ switchip_phy_write(phy_address[1], 24, 0x0510);
|
|
+ switchip_phy_write(phy_address[1], 17, 0xA38C);
|
|
+ switchip_phy_write(phy_address[1], 17, 0xA384);
|
|
+ }
|
|
+#endif
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ /* Program PHY advertisements and restart auto-negotiation */
|
|
+ if (phy_oui == PHY_OUI_VITESSE ||
|
|
+ phy_oui == PHY_OUI_DEFAULT) {
|
|
+ switchip_phy_write(phy_address[1], 4, 0x05E1);
|
|
+ switchip_phy_write(phy_address[1], 9, 0x0300);
|
|
+ switchip_phy_write(phy_address[1], 0, 0x3300);
|
|
+ } else {
|
|
+ reg = switchip_phy_read(phy_address[1], 0);
|
|
+ reg |= 0x1000; /* auto-negotiation enable */
|
|
+ switchip_phy_write(phy_address[1], 0, reg);
|
|
+ reg |= 0x0200; /* auto-negotiation restart */
|
|
+ switchip_phy_write(phy_address[1], 0, reg);
|
|
+ }
|
|
+ } else {
|
|
+ /* Force SWITCH link with highest capability:
|
|
+ * 100M FD for MII
|
|
+ * 1G FD for GMII/RGMII
|
|
+ */
|
|
+ switch (mode) {
|
|
+ case 1: /* *MII,MII */
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(1),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
|
|
+ break;
|
|
+ case 0: /* *RGMII,RGMII */
|
|
+ case 3: /* *MII,RGMII */
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(2),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
|
|
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1RDLY, 0, rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1TDLY_VAL(2),
|
|
+ rgmii_ctl);
|
|
+ break;
|
|
+ case 2: /* RGMII,*GMII */
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(2),
|
|
+ rgmii_ctl);
|
|
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
|
|
+ break;
|
|
+ }
|
|
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_FLP, p0_ctl);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Allow unknown unicast/multicast and broadcasts
|
|
+ * on all ports.
|
|
+ */
|
|
+
|
|
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_UP_VAL(7), sw_gctl1);
|
|
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_BP_VAL(7), sw_gctl1);
|
|
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_MP_VAL(7), sw_gctl1);
|
|
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_RP_VAL(7), sw_gctl1);
|
|
+
|
|
+ /* Enable LAN port(s) */
|
|
+ if (eth_port == 0)
|
|
+ es_w32_mask(LTQ_ES_P0_CTL_REG_SPS, 0, p0_ctl);
|
|
+ else
|
|
+ es_w32_mask(LTQ_ES_P0_CTL_REG_SPS, 0, p1_ctl);
|
|
+ /* Enable CPU Port (Forwarding State) */
|
|
+ es_w32_mask(LTQ_ES_P0_CTL_REG_SPS, 0, p2_ctl);
|
|
+
|
|
+ if (phy_present)
|
|
+ switchip_mdio_poll_init();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(switchip_init);
|
|
+
|
|
+device_initcall(switchip_init);
|
|
Index: linux-3.3.8/arch/mips/lantiq/Platform
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/Platform 2012-07-31 19:51:32.289105839 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/Platform 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -7,3 +7,4 @@
|
|
load-$(CONFIG_LANTIQ) = 0xffffffff80002000
|
|
cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
|
|
cflags-$(CONFIG_SOC_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
|
|
+cflags-$(CONFIG_SOC_SVIP) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/svip
|
|
Index: linux-3.3.8/arch/mips/lantiq/clk.h
|
|
===================================================================
|
|
--- linux-3.3.8.orig/arch/mips/lantiq/clk.h 2012-07-31 19:51:33.501105891 +0200
|
|
+++ linux-3.3.8/arch/mips/lantiq/clk.h 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -56,6 +56,10 @@
|
|
extern unsigned long ltq_danube_fpi_hz(void);
|
|
extern unsigned long ltq_danube_io_region_clock(void);
|
|
|
|
+extern unsigned long ltq_svip_cpu_hz(void);
|
|
+extern unsigned long ltq_svip_fpi_hz(void);
|
|
+extern unsigned long ltq_svip_io_region_clock(void);
|
|
+
|
|
extern unsigned long ltq_ar9_cpu_hz(void);
|
|
extern unsigned long ltq_ar9_fpi_hz(void);
|
|
|
|
Index: linux-3.3.8/drivers/net/ethernet/Kconfig
|
|
===================================================================
|
|
--- linux-3.3.8.orig/drivers/net/ethernet/Kconfig 2012-07-31 19:51:33.105105873 +0200
|
|
+++ linux-3.3.8/drivers/net/ethernet/Kconfig 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -91,6 +91,18 @@
|
|
---help---
|
|
Support for the MII0 inside the Lantiq SoC
|
|
|
|
+config LANTIQ_SVIP_ETH
|
|
+ default y
|
|
+ tristate "Lantiq SoC SVIP Ethernet driver"
|
|
+ depends on SOC_SVIP
|
|
+ help
|
|
+ Support for the MII0 inside the Lantiq SVIP SoC
|
|
+
|
|
+config LANTIQ_SVIP_VIRTUAL_ETH
|
|
+ default y
|
|
+ tristate "Lantiq SoC SVIP Virtual Ethernet driver"
|
|
+ depends on SOC_SVIP
|
|
+
|
|
source "drivers/net/ethernet/marvell/Kconfig"
|
|
source "drivers/net/ethernet/mellanox/Kconfig"
|
|
source "drivers/net/ethernet/micrel/Kconfig"
|
|
Index: linux-3.3.8/drivers/net/ethernet/Makefile
|
|
===================================================================
|
|
--- linux-3.3.8.orig/drivers/net/ethernet/Makefile 2012-07-31 19:51:33.105105873 +0200
|
|
+++ linux-3.3.8/drivers/net/ethernet/Makefile 2012-07-31 19:51:34.153105919 +0200
|
|
@@ -37,6 +37,8 @@
|
|
obj-$(CONFIG_KORINA) += korina.o
|
|
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
|
|
obj-$(CONFIG_LANTIQ_VRX200) += lantiq_vrx200.o
|
|
+obj-$(CONFIG_LANTIQ_SVIP_ETH) += svip_eth.o
|
|
+obj-$(CONFIG_LANTIQ_SVIP_VIRTUAL_ETH) += svip_virtual_eth.o
|
|
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
|
|
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
|
|
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
|