ixp4xx: add Mikael Petterssons patch works for 2.6.33 & 2.6.35

SVN-Revision: 21879
This commit is contained in:
Alexandros C. Couloumbis 2010-06-22 14:10:55 +00:00
parent a9783bd1c1
commit f2bf29dcfb
7 changed files with 134 additions and 19 deletions

View file

@ -0,0 +1,12 @@
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -384,7 +384,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
- WARN_ON(irqs_disabled());
+ if (irqs_disabled()) /* don't want stack dumps for these! */
+ printk("WARNING: at %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__);
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;

View file

@ -0,0 +1,33 @@
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -322,12 +322,13 @@ static void __init setup_processor(void)
void cpu_init(void)
{
unsigned int cpu = smp_processor_id();
- struct stack *stk = &stacks[cpu];
+ struct stack *stk;
if (cpu >= NR_CPUS) {
printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
BUG();
}
+ stk = &stacks[cpu];
/*
* Define the placement constraint for the inline asm directive below.
@@ -386,13 +387,14 @@ static struct machine_desc * __init setu
static int __init arm_add_memory(unsigned long start, unsigned long size)
{
- struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ struct membank *bank;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring memory at %#lx\n", start);
return -EINVAL;
}
+ bank = &meminfo.bank[meminfo.nr_banks];
/*
* Ensure that start/size are aligned to a page boundary.

View file

@ -0,0 +1,20 @@
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -427,6 +427,17 @@ static void __init ixp4xx_clocksource_in
}
/*
+ * sched_clock()
+ */
+unsigned long long sched_clock(void)
+{
+ cycle_t cyc = ixp4xx_get_cycles(NULL);
+ struct clocksource *cs = &clocksource_ixp4xx;
+
+ return clocksource_cyc2ns(cyc, cs->mult, cs->shift);
+}
+
+/*
* clockevents
*/
static int ixp4xx_set_next_event(unsigned long evt,

View file

@ -0,0 +1,11 @@
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -127,8 +127,6 @@ make_coherent(struct address_space *mapp
flush_dcache_mmap_unlock(mapping);
if (aliases)
adjust_pte(vma, addr);
- else
- flush_cache_page(vma, addr, pfn);
}
/*

View file

@ -1,8 +1,8 @@
--- a/arch/arm/Kconfig --- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig +++ b/arch/arm/Kconfig
@@ -417,7 +417,6 @@ config ARCH_IXP4XX @@ -435,7 +435,6 @@ config ARCH_IXP4XX
select CPU_XSCALE
select GENERIC_GPIO select GENERIC_GPIO
select GENERIC_TIME
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
- select DMABOUNCE if PCI - select DMABOUNCE if PCI
help help
@ -10,26 +10,24 @@
--- a/arch/arm/mach-ixp4xx/Kconfig --- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -199,6 +199,45 @@ config IXP4XX_INDIRECT_PCI @@ -199,6 +199,43 @@ config IXP4XX_INDIRECT_PCI
need to use the indirect method instead. If you don't know need to use the indirect method instead. If you don't know
what you need, leave this option unselected. what you need, leave this option unselected.
+config IXP4XX_LEGACY_DMABOUNCE +config IXP4XX_LEGACY_DMABOUNCE
+ bool "legacy PCI DMA bounce support" + bool "Legacy PCI DMA bounce support"
+ depends on PCI + depends on PCI
+ default n + default n
+ select DMABOUNCE + select DMABOUNCE
+ help + help
+ The IXP4xx is limited to a 64MB window for PCI DMA, which + The IXP4xx is limited to a 64MB window for PCI DMA, which
+ requires that PCI accesses above 64MB are bounced via buffers + requires that PCI accesses >= 64MB are bounced via buffers
+ below 64MB. Furthermore the IXP4xx has an erratum where PCI + below 64MB.
+ read prefetches just below the 64MB limit can trigger lockups.
+ +
+ The kernel has traditionally handled these two issue by using + The kernel has traditionally handled this issue by using ARM
+ ARM specific DMA bounce support code for all accesses >= 64MB. + specific DMA bounce support code for all accesses >= 64MB.
+ That code causes problems of its own, so it is desirable to + That code causes problems of its own, so it is desirable to
+ disable it. As the kernel now has a workaround for the PCI read + disable it.
+ prefetch erratum, it no longer requires the ARM bounce code.
+ +
+ Enabling this option makes IXP4xx continue to use the problematic + Enabling this option makes IXP4xx continue to use the problematic
+ ARM DMA bounce code. Disabling this option makes IXP4xx use the + ARM DMA bounce code. Disabling this option makes IXP4xx use the
@ -58,7 +56,7 @@
help help
--- a/arch/arm/mach-ixp4xx/common-pci.c --- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -321,27 +321,38 @@ static int abort_handler(unsigned long a @@ -321,27 +321,33 @@ static int abort_handler(unsigned long a
*/ */
static int ixp4xx_pci_platform_notify(struct device *dev) static int ixp4xx_pci_platform_notify(struct device *dev)
{ {
@ -88,12 +86,8 @@
+#ifdef CONFIG_DMABOUNCE +#ifdef CONFIG_DMABOUNCE
int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{ {
+ /* Note that this returns true for the last page below 64M due to - return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
+ * IXP4xx erratum 15 (SCR 1289), which states that PCI prefetches + return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
+ * can cross the boundary between valid memory and a reserved region
+ * causing AHB bus errors and a lock-up.
+ */
return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
} }
+#endif +#endif
@ -101,7 +95,7 @@
/* /*
* Only first 64MB of memory can be accessed via PCI. * Only first 64MB of memory can be accessed via PCI.
* We use GFP_DMA to allocate safe buffers to do map/unmap. * We use GFP_DMA to allocate safe buffers to do map/unmap.
@@ -364,6 +375,7 @@ void __init ixp4xx_adjust_zones(int node @@ -364,6 +370,7 @@ void __init ixp4xx_adjust_zones(int node
zhole_size[1] = zhole_size[0]; zhole_size[1] = zhole_size[0];
zhole_size[0] = 0; zhole_size[0] = 0;
} }

View file

@ -0,0 +1,12 @@
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -381,7 +381,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
- WARN_ON(irqs_disabled());
+ if (irqs_disabled()) /* don't want stack dumps for these! */
+ printk("WARNING: at %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__);
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;

View file

@ -0,0 +1,33 @@
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -323,12 +323,13 @@ static void __init setup_processor(void)
void cpu_init(void)
{
unsigned int cpu = smp_processor_id();
- struct stack *stk = &stacks[cpu];
+ struct stack *stk;
if (cpu >= NR_CPUS) {
printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
BUG();
}
+ stk = &stacks[cpu];
/*
* Define the placement constraint for the inline asm directive below.
@@ -387,13 +388,14 @@ static struct machine_desc * __init setu
static int __init arm_add_memory(unsigned long start, unsigned long size)
{
- struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ struct membank *bank;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring memory at %#lx\n", start);
return -EINVAL;
}
+ bank = &meminfo.bank[meminfo.nr_banks];
/*
* Ensure that start/size are aligned to a page boundary.