kernel: remove linux 3.3 patches and config

Signed-off-by: Felix Fietkau <nbd@openwrt.org>

SVN-Revision: 44109
This commit is contained in:
Felix Fietkau 2015-01-24 20:02:02 +00:00
parent ed60bb8360
commit 186db26d05
179 changed files with 0 additions and 45292 deletions

File diff suppressed because it is too large Load diff

View file

@ -1,250 +0,0 @@
From 455bd4c430b0c0a361f38e8658a0d6cb469942b5 Mon Sep 17 00:00:00 2001
From: Ivan Djelic <ivan.djelic@parrot.com>
Date: Wed, 6 Mar 2013 20:09:27 +0100
Subject: [PATCH] ARM: 7668/1: fix memset-related crashes caused by recent GCC
(4.7.2) optimizations
Recent GCC versions (e.g. GCC-4.7.2) perform optimizations based on
assumptions about the implementation of memset and similar functions.
The current ARM optimized memset code does not return the value of
its first argument, as is usually expected from standard implementations.
For instance in the following function:
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
{
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
waiter->magic = waiter;
INIT_LIST_HEAD(&waiter->list);
}
compiled as:
800554d0 <debug_mutex_lock_common>:
800554d0: e92d4008 push {r3, lr}
800554d4: e1a00001 mov r0, r1
800554d8: e3a02010 mov r2, #16 ; 0x10
800554dc: e3a01011 mov r1, #17 ; 0x11
800554e0: eb04426e bl 80165ea0 <memset>
800554e4: e1a03000 mov r3, r0
800554e8: e583000c str r0, [r3, #12]
800554ec: e5830000 str r0, [r3]
800554f0: e5830004 str r0, [r3, #4]
800554f4: e8bd8008 pop {r3, pc}
GCC assumes memset returns the value of pointer 'waiter' in register r0; causing
register/memory corruptions.
This patch fixes the return value of the assembly version of memset.
It adds a 'mov' instruction and merges an additional load+store into
existing load/store instructions.
For ease of review, here is a breakdown of the patch into 4 simple steps:
Step 1
======
Perform the following substitutions:
ip -> r8, then
r0 -> ip,
and insert 'mov ip, r0' as the first statement of the function.
At this point, we have a memset() implementation returning the proper result,
but corrupting r8 on some paths (the ones that were using ip).
Step 2
======
Make sure r8 is saved and restored when (! CALGN(1)+0) == 1:
save r8:
- str lr, [sp, #-4]!
+ stmfd sp!, {r8, lr}
and restore r8 on both exit paths:
- ldmeqfd sp!, {pc} @ Now <64 bytes to go.
+ ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
(...)
tst r2, #16
stmneia ip!, {r1, r3, r8, lr}
- ldr lr, [sp], #4
+ ldmfd sp!, {r8, lr}
Step 3
======
Make sure r8 is saved and restored when (! CALGN(1)+0) == 0:
save r8:
- stmfd sp!, {r4-r7, lr}
+ stmfd sp!, {r4-r8, lr}
and restore r8 on both exit paths:
bgt 3b
- ldmeqfd sp!, {r4-r7, pc}
+ ldmeqfd sp!, {r4-r8, pc}
(...)
tst r2, #16
stmneia ip!, {r4-r7}
- ldmfd sp!, {r4-r7, lr}
+ ldmfd sp!, {r4-r8, lr}
Step 4
======
Rewrite register list "r4-r7, r8" as "r4-r8".
Signed-off-by: Ivan Djelic <ivan.djelic@parrot.com>
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Dirk Behme <dirk.behme@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
arch/arm/lib/memset.S | 85 ++++++++++++++++++++++++++-------------------------
1 file changed, 44 insertions(+), 41 deletions(-)
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -19,9 +19,9 @@
1: subs r2, r2, #4 @ 1 do we have enough
blt 5f @ 1 bytes to align with?
cmp r3, #2 @ 1
- strltb r1, [r0], #1 @ 1
- strleb r1, [r0], #1 @ 1
- strb r1, [r0], #1 @ 1
+ strltb r1, [ip], #1 @ 1
+ strleb r1, [ip], #1 @ 1
+ strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
/*
* The pointer is now aligned and the length is adjusted. Try doing the
@@ -29,10 +29,14 @@
*/
ENTRY(memset)
- ands r3, r0, #3 @ 1 unaligned?
+/*
+ * Preserve the contents of r0 for the return value.
+ */
+ mov ip, r0
+ ands r3, ip, #3 @ 1 unaligned?
bne 1b @ 1
/*
- * we know that the pointer in r0 is aligned to a word boundary.
+ * we know that the pointer in ip is aligned to a word boundary.
*/
orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
@@ -43,29 +47,28 @@ ENTRY(memset)
#if ! CALGN(1)+0
/*
- * We need an extra register for this loop - save the return address and
- * use the LR
+ * We need 2 extra registers for this loop - use r8 and the LR
*/
- str lr, [sp, #-4]!
- mov ip, r1
+ stmfd sp!, {r8, lr}
+ mov r8, r1
mov lr, r1
2: subs r2, r2, #64
- stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
- ldmeqfd sp!, {pc} @ Now <64 bytes to go.
+ ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
- stmneia r0!, {r1, r3, ip, lr}
- stmneia r0!, {r1, r3, ip, lr}
+ stmneia ip!, {r1, r3, r8, lr}
+ stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
- stmneia r0!, {r1, r3, ip, lr}
- ldr lr, [sp], #4
+ stmneia ip!, {r1, r3, r8, lr}
+ ldmfd sp!, {r8, lr}
#else
@@ -74,54 +77,54 @@ ENTRY(memset)
* whole cache lines at once.
*/
- stmfd sp!, {r4-r7, lr}
+ stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
- mov ip, r1
+ mov r8, r1
mov lr, r1
cmp r2, #96
- tstgt r0, #31
+ tstgt ip, #31
ble 3f
- and ip, r0, #31
- rsb ip, ip, #32
- sub r2, r2, ip
- movs ip, ip, lsl #(32 - 4)
- stmcsia r0!, {r4, r5, r6, r7}
- stmmiia r0!, {r4, r5}
- tst ip, #(1 << 30)
- mov ip, r1
- strne r1, [r0], #4
+ and r8, ip, #31
+ rsb r8, r8, #32
+ sub r2, r2, r8
+ movs r8, r8, lsl #(32 - 4)
+ stmcsia ip!, {r4, r5, r6, r7}
+ stmmiia ip!, {r4, r5}
+ tst r8, #(1 << 30)
+ mov r8, r1
+ strne r1, [ip], #4
3: subs r2, r2, #64
- stmgeia r0!, {r1, r3-r7, ip, lr}
- stmgeia r0!, {r1, r3-r7, ip, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
- ldmeqfd sp!, {r4-r7, pc}
+ ldmeqfd sp!, {r4-r8, pc}
tst r2, #32
- stmneia r0!, {r1, r3-r7, ip, lr}
+ stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
- stmneia r0!, {r4-r7}
- ldmfd sp!, {r4-r7, lr}
+ stmneia ip!, {r4-r7}
+ ldmfd sp!, {r4-r8, lr}
#endif
4: tst r2, #8
- stmneia r0!, {r1, r3}
+ stmneia ip!, {r1, r3}
tst r2, #4
- strne r1, [r0], #4
+ strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
- strneb r1, [r0], #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
+ strneb r1, [ip], #1
tst r2, #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
mov pc, lr
ENDPROC(memset)

View file

@ -1,78 +0,0 @@
From 418df63adac56841ef6b0f1fcf435bc64d4ed177 Mon Sep 17 00:00:00 2001
From: Nicolas Pitre <nicolas.pitre@linaro.org>
Date: Tue, 12 Mar 2013 13:00:42 +0100
Subject: [PATCH] ARM: 7670/1: fix the memset fix
Commit 455bd4c430b0 ("ARM: 7668/1: fix memset-related crashes caused by
recent GCC (4.7.2) optimizations") attempted to fix a compliance issue
with the memset return value. However the memset itself became broken
by that patch for misaligned pointers.
This fixes the above by branching over the entry code from the
misaligned fixup code to avoid reloading the original pointer.
Also, because the function entry alignment is wrong in the Thumb mode
compilation, that fixup code is moved to the end.
While at it, the entry instructions are slightly reworked to help dual
issue pipelines.
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Alexander Holler <holler@ahsoftware.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
arch/arm/lib/memset.S | 33 +++++++++++++--------------------
1 file changed, 13 insertions(+), 20 deletions(-)
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,31 +14,15 @@
.text
.align 5
- .word 0
-
-1: subs r2, r2, #4 @ 1 do we have enough
- blt 5f @ 1 bytes to align with?
- cmp r3, #2 @ 1
- strltb r1, [ip], #1 @ 1
- strleb r1, [ip], #1 @ 1
- strb r1, [ip], #1 @ 1
- add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted. Try doing the
- * memset again.
- */
ENTRY(memset)
-/*
- * Preserve the contents of r0 for the return value.
- */
- mov ip, r0
- ands r3, ip, #3 @ 1 unaligned?
- bne 1b @ 1
+ ands r3, r0, #3 @ 1 unaligned?
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
/*
* we know that the pointer in ip is aligned to a word boundary.
*/
- orr r1, r1, r1, lsl #8
+1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
@@ -127,4 +111,13 @@ ENTRY(memset)
tst r2, #1
strneb r1, [ip], #1
mov pc, lr
+
+6: subs r2, r2, #4 @ 1 do we have enough
+ blt 5b @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strltb r1, [ip], #1 @ 1
+ strleb r1, [ip], #1 @ 1
+ strb r1, [ip], #1 @ 1
+ add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
+ b 1b
ENDPROC(memset)

View file

@ -1,249 +0,0 @@
From c022630633624a75b3b58f43dd3c6cc896a56cff Mon Sep 17 00:00:00 2001
From: "Steven J. Hill" <sjhill@mips.com>
Date: Fri, 6 Jul 2012 21:56:01 +0200
Subject: [PATCH] MIPS: Refactor 'clear_page' and 'copy_page' functions.
Remove usage of the '__attribute__((alias("...")))' hack that aliased
to integer arrays containing micro-assembled instructions. This hack
breaks when building a microMIPS kernel. It also makes the code much
easier to understand.
[ralf@linux-mips.org: Added back export of the clear_page and copy_page
symbols so certain modules will work again. Also fixed build with
CONFIG_SIBYTE_DMA_PAGEOPS enabled.]
Signed-off-by: Steven J. Hill <sjhill@mips.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/3866/
Acked-by: David Daney <david.daney@cavium.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
arch/mips/kernel/mips_ksyms.c | 8 ++++-
arch/mips/mm/Makefile | 4 +--
arch/mips/mm/page-funcs.S | 50 ++++++++++++++++++++++++++++++
arch/mips/mm/page.c | 67 +++++++++++------------------------------
4 files changed, 77 insertions(+), 52 deletions(-)
create mode 100644 arch/mips/mm/page-funcs.S
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/
#include <linux/interrupt.h>
@@ -35,6 +35,12 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(kernel_thread);
/*
+ * Functions that operate on entire pages. Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,8 +3,8 @@
#
obj-y += cache.o dma-default.o extable.o fault.o \
- gup.o init.o mmap.o page.o tlbex.o \
- tlbex-fault.o uasm.o
+ gup.o init.o mmap.o page.o page-funcs.o \
+ tlbex.o tlbex-fault.o uasm.o
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
--- /dev/null
+++ b/arch/mips/mm/page-funcs.S
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name clear_page_cpu
+#define cpu_copy_page_function_name copy_page_cpu
+#else
+#define cpu_clear_page_function_name clear_page
+#define cpu_copy_page_function_name copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x058 bytes
+ * R4600 v1.7: 0x05c bytes
+ * R4600 v2.0: 0x060 bytes
+ * With prefetching, 16 word strides 0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x11c bytes
+ * R4600 v1.7: 0x080 bytes
+ * R4600 v2.0: 0x07c bytes
+ * With prefetching, 16 word strides 0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -6,6 +6,7 @@
* Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 Maciej W. Rozycki
* Copyright (C) 2008 Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -72,45 +73,6 @@ static struct uasm_reloc __cpuinitdata r
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x058 bytes
- * R4600 v1.7: 0x05c bytes
- * R4600 v2.0: 0x060 bytes
- * With prefetching, 16 word strides 0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x11c bytes
- * R4600 v1.7: 0x080 bytes
- * R4600 v2.0: 0x07c bytes
- * With prefetching, 16 word strides 0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
static int pref_bias_clear_store __cpuinitdata;
static int pref_bias_copy_load __cpuinitdata;
static int pref_bias_copy_store __cpuinitdata;
@@ -283,10 +245,15 @@ static inline void __cpuinit build_clear
}
}
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
void __cpuinit build_clear_page(void)
{
int off;
- u32 *buf = (u32 *)&clear_page_array;
+ u32 *buf = &__clear_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
@@ -357,17 +324,17 @@ void __cpuinit build_clear_page(void)
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
- BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+ BUG_ON(buf > &__clear_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized clear page handler (%u instructions).\n",
- (u32)(buf - clear_page_array));
+ (u32)(buf - &__clear_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - clear_page_array); i++)
- pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+ for (i = 0; i < (buf - &__clear_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
pr_debug("\t.set pop\n");
}
@@ -428,7 +395,7 @@ static inline void build_copy_store_pref
void __cpuinit build_copy_page(void)
{
int off;
- u32 *buf = (u32 *)&copy_page_array;
+ u32 *buf = &__copy_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
@@ -596,21 +563,23 @@ void __cpuinit build_copy_page(void)
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
- BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+ BUG_ON(buf > &__copy_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized copy page handler (%u instructions).\n",
- (u32)(buf - copy_page_array));
+ (u32)(buf - &__copy_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - copy_page_array); i++)
- pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+ for (i = 0; i < (buf - &__copy_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
pr_debug("\t.set pop\n");
}
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
/*
* Pad descriptors to cacheline, since each is exclusively owned by a

View file

@ -1,96 +0,0 @@
From 2d303b4683145f7dbc918bd14d04e1396581b2ce Mon Sep 17 00:00:00 2001
From: Imre Kaloz <kaloz@openwrt.org>
Date: Thu, 7 Jul 2011 12:05:21 +0200
Subject: [PATCH] ARM: support XZ compressed kernels
Wire up support for the XZ decompressor
Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
---
arch/arm/Kconfig | 1 +
arch/arm/boot/compressed/Makefile | 11 +++++++++--
arch/arm/boot/compressed/decompress.c | 4 ++++
arch/arm/boot/compressed/piggy.xzkern.S | 6 ++++++
lib/xz/xz_dec_stream.c | 1 +
5 files changed, 21 insertions(+), 2 deletions(-)
create mode 100644 arch/arm/boot/compressed/piggy.xzkern.S
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -21,6 +21,7 @@ config ARM
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_XZ
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -92,6 +92,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/
suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo
suffix_$(CONFIG_KERNEL_LZMA) = lzma
+suffix_$(CONFIG_KERNEL_XZ) = xzkern
# Borrowed libfdt files for the ATAG compatibility mode
@@ -115,7 +116,7 @@ targets := vmlinux vmlinux.lds \
lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean
-extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
+extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -171,8 +172,14 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CO
false; \
fi
+# For __aeabi_llsl
+ashldi3 = $(obj)/ashldi3.o
+
+$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S FORCE
+ $(call cmd,shipped)
+
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
- $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
+ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE
@$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -44,6 +44,12 @@ extern void error(char *);
#include "../../../../lib/decompress_unlzma.c"
#endif
+#ifdef CONFIG_KERNEL_XZ
+#define memmove memmove
+#define memcpy memcpy
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
{
return decompress(input, len, NULL, NULL, output, NULL, error);
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.xzkern.S
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/arm/boot/compressed/piggy.xzkern"
+ .globl input_data_end
+input_data_end:
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -9,6 +9,7 @@
#include "xz_private.h"
#include "xz_stream.h"
+#include <linux/kernel.h>
/* Hash used to validate the Index field */
struct xz_dec_hash {

View file

@ -1,40 +0,0 @@
From 35e57e1b49a351aa804dab6010cd46ae6112a541 Mon Sep 17 00:00:00 2001
From: Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
Date: Wed, 21 Aug 2013 01:43:07 -0700
Subject: [PATCH 1/2] hso: Earlier catch of error condition
There is no need to get an interface specification if we know it's the
wrong one.
Signed-off-by: Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/usb/hso.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2927,6 +2927,11 @@ static int hso_probe(struct usb_interfac
struct hso_shared_int *shared_int;
struct hso_device *tmp_dev = NULL;
+ if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
+ dev_err(&interface->dev, "Not our interface\n");
+ return -ENODEV;
+ }
+
if_num = interface->altsetting->desc.bInterfaceNumber;
/* Get the interface/port specification from either driver_info or from
@@ -2936,10 +2941,6 @@ static int hso_probe(struct usb_interfac
else
port_spec = hso_get_config_data(interface);
- if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
- dev_err(&interface->dev, "Not our interface\n");
- return -ENODEV;
- }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
if (interface->num_altsetting > 1)

View file

@ -1,48 +0,0 @@
From e75dc677ff8d06ffa61bef6fee436227ae5440c6 Mon Sep 17 00:00:00 2001
From: Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
Date: Wed, 21 Aug 2013 01:43:19 -0700
Subject: [PATCH 2/2] hso: Fix stack corruption on some architectures
As Sergei Shtylyov explained in the #mipslinux IRC channel:
[Mon 2013-08-19 12:28:21 PM PDT] <headless> guys, are you sure it's not "DMA off stack" case?
[Mon 2013-08-19 12:28:35 PM PDT] <headless> it's a known stack corruptor on non-coherent arches
[Mon 2013-08-19 12:31:48 PM PDT] <DonkeyHotei> headless: for usb/ehci?
[Mon 2013-08-19 12:34:11 PM PDT] <DonkeyHotei> headless: explain
[Mon 2013-08-19 12:35:38 PM PDT] <headless> usb_control_msg() (or other such func) should not use buffer on stack. DMA from/to stack is prohibited
[Mon 2013-08-19 12:35:58 PM PDT] <headless> and EHCI uses DMA on control xfers (as well as all the others)
Signed-off-by: Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/usb/hso.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2857,13 +2857,16 @@ exit:
static int hso_get_config_data(struct usb_interface *interface)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
- u8 config_data[17];
+ u8 *config_data = kmalloc(17, GFP_KERNEL);
u32 if_num = interface->altsetting->desc.bInterfaceNumber;
s32 result;
+ if (!config_data)
+ return -ENOMEM;
if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
0x86, 0xC0, 0, 0, config_data, 17,
USB_CTRL_SET_TIMEOUT) != 0x11) {
+ kfree(config_data);
return -EIO;
}
@@ -2914,6 +2917,7 @@ static int hso_get_config_data(struct us
if (config_data[16] & 0x1)
result |= HSO_INFO_CRC_BUG;
+ kfree(config_data);
return result;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,757 +0,0 @@
From a93fd80d261f1dc2788442dba8dd5701363d3d6e Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Thu, 10 May 2012 07:51:25 +0000
Subject: [PATCH] codel: Controlled Delay AQM
commit 76e3cc126bb223013a6b9a0e2a51238d1ef2e409 upstream.
An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
http://queue.acm.org/detail.cfm?id=2209336
This AQM main input is no longer queue size in bytes or packets, but the
delay packets stay in (FIFO) queue.
As we don't have infinite memory, we still can drop packets in enqueue()
in case of massive load, but mean of CoDel is to drop packets in
dequeue(), using a control law based on two simple parameters :
target : target sojourn time (default 5ms)
interval : width of moving time window (default 100ms)
Based on initial work from Dave Taht.
Refactored to help future codel inclusion as a plugin for other linux
qdisc (FQ_CODEL, ...), like RED.
include/net/codel.h contains codel algorithm as close as possible than
Kathleen reference.
net/sched/sch_codel.c contains the linux qdisc specific glue.
Separate structures permit a memory efficient implementation of fq_codel
(to be sent as a separate work) : Each flow has its own struct
codel_vars.
timestamps are taken at enqueue() time with 1024 ns precision, allowing
a range of 2199 seconds in queue, and 100Gb links support. iproute2 uses
usec as base unit.
Selected packets are dropped, unless ECN is enabled and packets can get
ECN mark instead.
Tested from 2Mb to 10Gb speeds with no particular problems, on ixgbe and
tg3 drivers (BQL enabled).
Usage: tc qdisc ... codel [ limit PACKETS ] [ target TIME ]
[ interval TIME ] [ ecn ]
qdisc codel 10: parent 1:1 limit 2000p target 3.0ms interval 60.0ms ecn
Sent 13347099587 bytes 8815805 pkt (dropped 0, overlimits 0 requeues 0)
rate 202365Kbit 16708pps backlog 113550b 75p requeues 0
count 116 lastcount 98 ldelay 4.3ms dropping drop_next 816us
maxpacket 1514 ecn_mark 84399 drop_overlimit 0
CoDel must be seen as a base module, and should be used keeping in mind
there is still a FIFO queue. So a typical setup will probably need a
hierarchy of several qdiscs and packet classifiers to be able to meet
whatever constraints a user might have.
One possible example would be to use fq_codel, which combines Fair
Queueing and CoDel, in replacement of sfq / sfq_red.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
Cc: Tom Herbert <therbert@google.com>
Cc: Matt Mathis <mattmathis@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/linux/pkt_sched.h | 26 ++++
include/net/codel.h | 332 +++++++++++++++++++++++++++++++++++++++++++++
net/sched/Kconfig | 11 ++
net/sched/Makefile | 1 +
net/sched/sch_codel.c | 275 +++++++++++++++++++++++++++++++++++++
5 files changed, 645 insertions(+)
create mode 100644 include/net/codel.h
create mode 100644 net/sched/sch_codel.c
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -633,4 +633,30 @@ struct tc_qfq_stats {
__u32 lmax;
};
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count; /* how many drops we've done since the last time we
+ * entered dropping state
+ */
+ __u32 lastcount; /* count at entry to dropping state */
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __s32 drop_next; /* time to drop next packet */
+ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
+ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
+ __u32 dropping; /* are we in dropping state ? */
+};
+
#endif
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,332 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/* CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+typedef s32 codel_tdiff_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_to_ns(ktime_get());
+
+ return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0)
+#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0)
+#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0)
+#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0)
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+/**
+ * struct codel_params - contains codel parameters
+ * @target: target queue size (in time units)
+ * @interval: width of moving time window
+ * @ecn: is Explicit Congestion Notification enabled
+ */
+struct codel_params {
+ codel_time_t target;
+ codel_time_t interval;
+ bool ecn;
+};
+
+/**
+ * struct codel_vars - contains codel variables
+ * @count: how many drops we've done since the last time we
+ * entered dropping state
+ * @lastcount: count at entry to dropping state
+ * @dropping: set to true if in dropping state
+ * @first_above_time: when we went (or will go) continuously above target
+ * for interval
+ * @drop_next: time to drop next packet, or when we dropped last
+ * @ldelay: sojourn time of last dequeued packet
+ */
+struct codel_vars {
+ u32 count;
+ u32 lastcount;
+ bool dropping;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+};
+
+/**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket: largest packet we've seen so far
+ * @drop_count: temp count of dropped packets in dequeue()
+ * ecn_mark: number of packets we ECN marked instead of dropping
+ */
+struct codel_stats {
+ u32 maxpacket;
+ u32 drop_count;
+ u32 ecn_mark;
+};
+
+static void codel_params_init(struct codel_params *params)
+{
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ vars->drop_next = 0;
+ vars->first_above_time = 0;
+ vars->dropping = false; /* exit dropping state */
+ vars->count = 0;
+ vars->lastcount = 0;
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+ stats->maxpacket = 256;
+}
+
+/* return interval/sqrt(x) with good precision
+ * relies on int_sqrt(unsigned long x) kernel implementation
+ */
+static u32 codel_inv_sqrt(u32 _interval, u32 _x)
+{
+ u64 interval = _interval;
+ unsigned long x = _x;
+
+ /* Scale operands for max precision */
+
+#if BITS_PER_LONG == 64
+ x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
+ interval <<= 16;
+#endif
+
+ while (x < (1UL << (BITS_PER_LONG - 2))) {
+ x <<= 2;
+ interval <<= 1;
+ }
+ do_div(interval, int_sqrt(x));
+ return (u32)interval;
+}
+
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 count)
+{
+ return t + codel_inv_sqrt(interval, count);
+}
+
+
+static bool codel_should_drop(struct sk_buff *skb,
+ unsigned int *backlog,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ vars->ldelay = now - codel_get_enqueue_time(skb);
+ *backlog -= qdisc_pkt_len(skb);
+
+ if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+ stats->maxpacket = qdisc_pkt_len(skb);
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ *backlog <= stats->maxpacket) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ }
+ return ok_to_drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ struct Qdisc *sch);
+
+static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_dequeue_t dequeue_func,
+ u32 *backlog)
+{
+ struct sk_buff *skb = dequeue_func(vars, sch);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, backlog, vars, params, stats, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ if (++vars->count == 0) /* avoid zero divides */
+ vars->count = ~0U;
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ goto end;
+ }
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars, sch);
+ if (!codel_should_drop(skb, backlog,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ }
+ }
+ }
+ } else if (drop) {
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, sch);
+ drop = codel_should_drop(skb, backlog, vars, params,
+ stats, now);
+ }
+ vars->dropping = true;
+ /* if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ if (codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = (vars->count - vars->lastcount) | 1;
+ } else {
+ vars->count = 1;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->count);
+ }
+end:
+ return skb;
+}
+#endif
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,17 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,275 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ * Implemented on linux by :
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+ struct codel_params params;
+ struct codel_vars vars;
+ struct codel_stats stats;
+ u32 drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
+ dequeue, &sch->qstats.backlog);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->stats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ q->stats.drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(qdisc_qlen(sch) < sch->limit)) {
+ codel_set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_CODEL_ECN] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_ECN])
+ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ sch->limit = DEFAULT_CODEL_LIMIT;
+
+ codel_params_init(&q->params);
+ codel_vars_init(&q->vars);
+ codel_stats_init(&q->stats);
+
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CODEL_TARGET,
+ codel_time_to_us(q->params.target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL,
+ codel_time_to_us(q->params.interval)) ||
+ nla_put_u32(skb, TCA_CODEL_ECN,
+ q->params.ecn))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ const struct codel_sched_data *q = qdisc_priv(sch);
+ struct tc_codel_xstats st = {
+ .maxpacket = q->stats.maxpacket,
+ .count = q->vars.count,
+ .lastcount = q->vars.lastcount,
+ .drop_overlimit = q->drop_overlimit,
+ .ldelay = codel_time_to_us(q->vars.ldelay),
+ .dropping = q->vars.dropping,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ if (q->vars.dropping) {
+ codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
+
+ if (delta >= 0)
+ st.drop_next = codel_time_to_us(delta);
+ else
+ st.drop_next = -codel_time_to_us(-delta);
+ }
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_qdisc_enqueue,
+ .dequeue = codel_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");

View file

@ -1,185 +0,0 @@
From 4a8056dfeef49b306ad6af24a5563d7d6867aae0 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Sat, 12 May 2012 03:32:13 +0000
Subject: [PATCH] codel: use Newton method instead of sqrt() and divides
commit 536edd67109df5e0cdb2c4ee759e9bade7976367 upstream.
As Van pointed out, interval/sqrt(count) can be implemented using
multiplies only.
http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
This patch implements the Newton method and reciprocal divide.
Total cost is 15 cycles instead of 120 on my Corei5 machine (64bit
kernel).
There is a small 'error' for count values < 5, but we don't really care.
I reuse a hole in struct codel_vars :
- pack the dropping boolean into one bit
- use 31bit to store the reciprocal value of sqrt(count).
Suggested-by: Van Jacobson <van@pollere.net>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Matt Mathis <mattmathis@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Nandita Dukkipati <nanditad@google.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/net/codel.h | 68 ++++++++++++++++++++++++++++-----------------------
1 file changed, 37 insertions(+), 31 deletions(-)
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -46,6 +46,7 @@
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
+#include <linux/reciprocal_div.h>
/* Controlling Queue Delay (CoDel) algorithm
* =========================================
@@ -123,6 +124,7 @@ struct codel_params {
* entered dropping state
* @lastcount: count at entry to dropping state
* @dropping: set to true if in dropping state
+ * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
* @first_above_time: when we went (or will go) continuously above target
* for interval
* @drop_next: time to drop next packet, or when we dropped last
@@ -131,7 +133,8 @@ struct codel_params {
struct codel_vars {
u32 count;
u32 lastcount;
- bool dropping;
+ bool dropping:1;
+ u32 rec_inv_sqrt:31;
codel_time_t first_above_time;
codel_time_t drop_next;
codel_time_t ldelay;
@@ -158,11 +161,7 @@ static void codel_params_init(struct cod
static void codel_vars_init(struct codel_vars *vars)
{
- vars->drop_next = 0;
- vars->first_above_time = 0;
- vars->dropping = false; /* exit dropping state */
- vars->count = 0;
- vars->lastcount = 0;
+ memset(vars, 0, sizeof(*vars));
}
static void codel_stats_init(struct codel_stats *stats)
@@ -170,38 +169,37 @@ static void codel_stats_init(struct code
stats->maxpacket = 256;
}
-/* return interval/sqrt(x) with good precision
- * relies on int_sqrt(unsigned long x) kernel implementation
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 31bit mantissa)
*/
-static u32 codel_inv_sqrt(u32 _interval, u32 _x)
+static void codel_Newton_step(struct codel_vars *vars)
{
- u64 interval = _interval;
- unsigned long x = _x;
-
- /* Scale operands for max precision */
+ u32 invsqrt = vars->rec_inv_sqrt;
+ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 31;
+ u64 val = (3LL << 31) - ((u64)vars->count * invsqrt2);
-#if BITS_PER_LONG == 64
- x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
- interval <<= 16;
-#endif
+ val = (val * invsqrt) >> 32;
- while (x < (1UL << (BITS_PER_LONG - 2))) {
- x <<= 2;
- interval <<= 1;
- }
- do_div(interval, int_sqrt(x));
- return (u32)interval;
+ vars->rec_inv_sqrt = val;
}
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
static codel_time_t codel_control_law(codel_time_t t,
codel_time_t interval,
- u32 count)
+ u32 rec_inv_sqrt)
{
- return t + codel_inv_sqrt(interval, count);
+ return t + reciprocal_divide(interval, rec_inv_sqrt << 1);
}
-static bool codel_should_drop(struct sk_buff *skb,
+static bool codel_should_drop(const struct sk_buff *skb,
unsigned int *backlog,
struct codel_vars *vars,
struct codel_params *params,
@@ -274,14 +272,16 @@ static struct sk_buff *codel_dequeue(str
*/
while (vars->dropping &&
codel_time_after_eq(now, vars->drop_next)) {
- if (++vars->count == 0) /* avoid zero divides */
- vars->count = ~0U;
+ vars->count++; /* dont care of possible wrap
+ * since there is no more divide
+ */
+ codel_Newton_step(vars);
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
vars->drop_next =
codel_control_law(vars->drop_next,
params->interval,
- vars->count);
+ vars->rec_inv_sqrt);
goto end;
}
qdisc_drop(skb, sch);
@@ -296,7 +296,7 @@ static struct sk_buff *codel_dequeue(str
vars->drop_next =
codel_control_law(vars->drop_next,
params->interval,
- vars->count);
+ vars->rec_inv_sqrt);
}
}
}
@@ -319,12 +319,18 @@ static struct sk_buff *codel_dequeue(str
if (codel_time_before(now - vars->drop_next,
16 * params->interval)) {
vars->count = (vars->count - vars->lastcount) | 1;
+ /* we dont care if rec_inv_sqrt approximation
+ * is not very precise :
+ * Next Newton steps will correct it quadratically.
+ */
+ codel_Newton_step(vars);
} else {
vars->count = 1;
+ vars->rec_inv_sqrt = 0x7fffffff;
}
vars->lastcount = vars->count;
vars->drop_next = codel_control_law(now, params->interval,
- vars->count);
+ vars->rec_inv_sqrt);
}
end:
return skb;

View file

@ -1,839 +0,0 @@
From f8cf19c19528a468cc0b9846c0328a94cccdc605 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Fri, 11 May 2012 09:30:50 +0000
Subject: [PATCH] fq_codel: Fair Queue Codel AQM
commit 4b549a2ef4bef9965d97cbd992ba67930cd3e0fe upstream.
Fair Queue Codel packet scheduler
Principles :
- Packets are classified (internal classifier or external) on flows.
- This is a Stochastic model (as we use a hash, several flows might
be hashed on same slot)
- Each flow has a CoDel managed queue.
- Flows are linked onto two (Round Robin) lists,
so that new flows have priority on old ones.
- For a given flow, packets are not reordered (CoDel uses a FIFO)
- head drops only.
- ECN capability is on by default.
- Very low memory footprint (64 bytes per flow)
tc qdisc ... fq_codel [ limit PACKETS ] [ flows number ]
[ target TIME ] [ interval TIME ] [ noecn ]
[ quantum BYTES ]
defaults : 1024 flows, 10240 packets limit, quantum : device MTU
target : 5ms (CoDel default)
interval : 100ms (CoDel default)
Impressive results on load :
class htb 1:1 root leaf 10: prio 0 quantum 1514 rate 200000Kbit ceil 200000Kbit burst 1475b/8 mpu 0b overhead 0b cburst 1475b/8 mpu 0b overhead 0b level 0
Sent 43304920109 bytes 33063109 pkt (dropped 0, overlimits 0 requeues 0)
rate 201691Kbit 28595pps backlog 0b 312p requeues 0
lended: 33063109 borrowed: 0 giants: 0
tokens: -912 ctokens: -912
class fq_codel 10:1735 parent 10:
(dropped 1292, overlimits 0 requeues 0)
backlog 15140b 10p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 7.1ms
class fq_codel 10:4524 parent 10:
(dropped 1291, overlimits 0 requeues 0)
backlog 16654b 11p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 7.1ms
class fq_codel 10:4e74 parent 10:
(dropped 1290, overlimits 0 requeues 0)
backlog 6056b 4p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 6.4ms dropping drop_next 92.0ms
class fq_codel 10:628a parent 10:
(dropped 1289, overlimits 0 requeues 0)
backlog 7570b 5p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 5.4ms dropping drop_next 90.9ms
class fq_codel 10:a4b3 parent 10:
(dropped 302, overlimits 0 requeues 0)
backlog 16654b 11p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 7.1ms
class fq_codel 10:c3c2 parent 10:
(dropped 1284, overlimits 0 requeues 0)
backlog 13626b 9p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 5.9ms
class fq_codel 10:d331 parent 10:
(dropped 299, overlimits 0 requeues 0)
backlog 15140b 10p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 7.0ms
class fq_codel 10:d526 parent 10:
(dropped 12160, overlimits 0 requeues 0)
backlog 35870b 211p requeues 0
deficit 1508 count 12160 lastcount 1 ldelay 15.3ms dropping drop_next 247us
class fq_codel 10:e2c6 parent 10:
(dropped 1288, overlimits 0 requeues 0)
backlog 15140b 10p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 7.1ms
class fq_codel 10:eab5 parent 10:
(dropped 1285, overlimits 0 requeues 0)
backlog 16654b 11p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 5.9ms
class fq_codel 10:f220 parent 10:
(dropped 1289, overlimits 0 requeues 0)
backlog 15140b 10p requeues 0
deficit 1514 count 1 lastcount 1 ldelay 7.1ms
qdisc htb 1: root refcnt 6 r2q 10 default 1 direct_packets_stat 0 ver 3.17
Sent 43331086547 bytes 33092812 pkt (dropped 0, overlimits 66063544 requeues 71)
rate 201697Kbit 28602pps backlog 0b 260p requeues 71
qdisc fq_codel 10: parent 1:1 limit 10240p flows 65536 target 5.0ms interval 100.0ms ecn
Sent 43331086547 bytes 33092812 pkt (dropped 949359, overlimits 0 requeues 0)
rate 201697Kbit 28602pps backlog 189352b 260p requeues 0
maxpacket 1514 drop_overlimit 0 new_flow_count 5582 ecn_mark 125593
new_flows_len 0 old_flows_len 11
PING 172.30.42.18 (172.30.42.18) 56(84) bytes of data.
64 bytes from 172.30.42.18: icmp_req=1 ttl=64 time=0.227 ms
64 bytes from 172.30.42.18: icmp_req=2 ttl=64 time=0.165 ms
64 bytes from 172.30.42.18: icmp_req=3 ttl=64 time=0.166 ms
64 bytes from 172.30.42.18: icmp_req=4 ttl=64 time=0.151 ms
64 bytes from 172.30.42.18: icmp_req=5 ttl=64 time=0.164 ms
64 bytes from 172.30.42.18: icmp_req=6 ttl=64 time=0.172 ms
64 bytes from 172.30.42.18: icmp_req=7 ttl=64 time=0.175 ms
64 bytes from 172.30.42.18: icmp_req=8 ttl=64 time=0.183 ms
64 bytes from 172.30.42.18: icmp_req=9 ttl=64 time=0.158 ms
64 bytes from 172.30.42.18: icmp_req=10 ttl=64 time=0.200 ms
10 packets transmitted, 10 received, 0% packet loss, time 8999ms
rtt min/avg/max/mdev = 0.151/0.176/0.227/0.022 ms
Much better than SFQ because of priority given to new flows, and fast
path dirtying less cache lines.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/linux/pkt_sched.h | 54 ++++
net/sched/Kconfig | 11 +
net/sched/Makefile | 1 +
net/sched/sch_fq_codel.c | 624 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 690 insertions(+)
create mode 100644 net/sched/sch_fq_codel.c
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -659,4 +659,58 @@ struct tc_codel_xstats {
__u32 dropping; /* are we in dropping state ? */
};
+/* FQ_CODEL */
+
+enum {
+ TCA_FQ_CODEL_UNSPEC,
+ TCA_FQ_CODEL_TARGET,
+ TCA_FQ_CODEL_LIMIT,
+ TCA_FQ_CODEL_INTERVAL,
+ TCA_FQ_CODEL_ECN,
+ TCA_FQ_CODEL_FLOWS,
+ TCA_FQ_CODEL_QUANTUM,
+ __TCA_FQ_CODEL_MAX
+};
+
+#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
+
+enum {
+ TCA_FQ_CODEL_XSTATS_QDISC,
+ TCA_FQ_CODEL_XSTATS_CLASS,
+};
+
+struct tc_fq_codel_qd_stats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 drop_overlimit; /* number of time max qdisc
+ * packet limit was hit
+ */
+ __u32 ecn_mark; /* number of packets we ECN marked
+ * instead of being dropped
+ */
+ __u32 new_flow_count; /* number of time packets
+ * created a 'new flow'
+ */
+ __u32 new_flows_len; /* count of flows in new list */
+ __u32 old_flows_len; /* count of flows in old list */
+};
+
+struct tc_fq_codel_cl_stats {
+ __s32 deficit;
+ __u32 ldelay; /* in-queue delay seen by most recently
+ * dequeued packet
+ */
+ __u32 count;
+ __u32 lastcount;
+ __u32 dropping;
+ __s32 drop_next;
+};
+
+struct tc_fq_codel_xstats {
+ __u32 type;
+ union {
+ struct tc_fq_codel_qd_stats qdisc_stats;
+ struct tc_fq_codel_cl_stats class_stats;
+ };
+};
+
#endif
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -261,6 +261,17 @@ config NET_SCH_CODEL
If unsure, say N.
+config NET_SCH_FQ_CODEL
+ tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
+ help
+ Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_fq_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqpr
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
+obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
--- /dev/null
+++ b/net/sched/sch_fq_codel.c
@@ -0,0 +1,624 @@
+/*
+ * Fair Queue CoDel discipline
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/flow_keys.h>
+#include <net/codel.h>
+
+/* Fair Queue CoDel.
+ *
+ * Principles :
+ * Packets are classified (internal classifier or external) on flows.
+ * This is a Stochastic model (as we use a hash, several flows
+ * might be hashed on same slot)
+ * Each flow has a CoDel managed queue.
+ * Flows are linked onto two (Round Robin) lists,
+ * so that new flows have priority on old ones.
+ *
+ * For a given flow, packets are not reordered (CoDel uses a FIFO)
+ * head drops only.
+ * ECN capability is on by default.
+ * Low memory footprint (64 bytes per flow)
+ */
+
+struct fq_codel_flow {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ struct list_head flowchain;
+ int deficit;
+ u32 dropped; /* number of drops (or ECN marks) on this flow */
+ struct codel_vars cvars;
+}; /* please try to keep this structure <= 64 bytes */
+
+struct fq_codel_sched_data {
+ struct tcf_proto *filter_list; /* optional external classifier */
+ struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
+ u32 *backlogs; /* backlog table [flows_cnt] */
+ u32 flows_cnt; /* number of flows */
+ u32 perturbation; /* hash perturbation */
+ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
+ struct codel_params cparams;
+ struct codel_stats cstats;
+ u32 drop_overlimit;
+ u32 new_flow_count;
+
+ struct list_head new_flows; /* list of new flows */
+ struct list_head old_flows; /* list of old flows */
+};
+
+static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
+ const struct sk_buff *skb)
+{
+ struct flow_keys keys;
+ unsigned int hash;
+
+ skb_flow_dissect(skb, &keys);
+ hash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src ^ keys.ip_proto,
+ (__force u32)keys.ports, q->perturbation);
+ return ((u64)hash * q->flows_cnt) >> 32;
+}
+
+static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
+ int *qerr)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tcf_result res;
+ int result;
+
+ if (TC_H_MAJ(skb->priority) == sch->handle &&
+ TC_H_MIN(skb->priority) > 0 &&
+ TC_H_MIN(skb->priority) <= q->flows_cnt)
+ return TC_H_MIN(skb->priority);
+
+ if (!q->filter_list)
+ return fq_codel_hash(q, skb) + 1;
+
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return 0;
+ }
+#endif
+ if (TC_H_MIN(res.classid) <= q->flows_cnt)
+ return TC_H_MIN(res.classid);
+ }
+ return 0;
+}
+
+/* helper functions : might be changed when/if skb use a standard list_head */
+
+/* remove one skb from head of slot queue */
+static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
+{
+ struct sk_buff *skb = flow->head;
+
+ flow->head = skb->next;
+ skb->next = NULL;
+ return skb;
+}
+
+/* add skb to flow queue (tail add) */
+static inline void flow_queue_add(struct fq_codel_flow *flow,
+ struct sk_buff *skb)
+{
+ if (flow->head == NULL)
+ flow->head = skb;
+ else
+ flow->tail->next = skb;
+ flow->tail = skb;
+ skb->next = NULL;
+}
+
+static unsigned int fq_codel_drop(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ unsigned int maxbacklog = 0, idx = 0, i, len;
+ struct fq_codel_flow *flow;
+
+ /* Queue is full! Find the fat flow and drop packet from it.
+ * This might sound expensive, but with 1024 flows, we scan
+ * 4KB of memory, and we dont need to handle a complex tree
+ * in fast path (packet queue/enqueue) with many cache misses.
+ */
+ for (i = 0; i < q->flows_cnt; i++) {
+ if (q->backlogs[i] > maxbacklog) {
+ maxbacklog = q->backlogs[i];
+ idx = i;
+ }
+ }
+ flow = &q->flows[idx];
+ skb = dequeue_head(flow);
+ len = qdisc_pkt_len(skb);
+ q->backlogs[idx] -= len;
+ kfree_skb(skb);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ flow->dropped++;
+ return idx;
+}
+
+static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ unsigned int idx;
+ struct fq_codel_flow *flow;
+ int uninitialized_var(ret);
+
+ idx = fq_codel_classify(skb, sch, &ret);
+ if (idx == 0) {
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+ }
+ idx--;
+
+ codel_set_enqueue_time(skb);
+ flow = &q->flows[idx];
+ flow_queue_add(flow, skb);
+ q->backlogs[idx] += qdisc_pkt_len(skb);
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
+ if (list_empty(&flow->flowchain)) {
+ list_add_tail(&flow->flowchain, &q->new_flows);
+ codel_vars_init(&flow->cvars);
+ q->new_flow_count++;
+ flow->deficit = q->quantum;
+ flow->dropped = 0;
+ }
+ if (++sch->q.qlen < sch->limit)
+ return NET_XMIT_SUCCESS;
+
+ q->drop_overlimit++;
+ /* Return Congestion Notification only if we dropped a packet
+ * from this flow.
+ */
+ if (fq_codel_drop(sch) == idx)
+ return NET_XMIT_CN;
+
+ /* As we dropped a packet, better let upper stack know this */
+ qdisc_tree_decrease_qlen(sch, 1);
+ return NET_XMIT_SUCCESS;
+}
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct fq_codel_flow *flow;
+ struct sk_buff *skb = NULL;
+
+ flow = container_of(vars, struct fq_codel_flow, cvars);
+ if (flow->head) {
+ skb = dequeue_head(flow);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ }
+ return skb;
+}
+
+static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ struct fq_codel_flow *flow;
+ struct list_head *head;
+ u32 prev_drop_count, prev_ecn_mark;
+
+begin:
+ head = &q->new_flows;
+ if (list_empty(head)) {
+ head = &q->old_flows;
+ if (list_empty(head))
+ return NULL;
+ }
+ flow = list_first_entry(head, struct fq_codel_flow, flowchain);
+
+ if (flow->deficit <= 0) {
+ flow->deficit += q->quantum;
+ list_move_tail(&flow->flowchain, &q->old_flows);
+ goto begin;
+ }
+
+ prev_drop_count = q->cstats.drop_count;
+ prev_ecn_mark = q->cstats.ecn_mark;
+
+ skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+ dequeue, &q->backlogs[flow - q->flows]);
+
+ flow->dropped += q->cstats.drop_count - prev_drop_count;
+ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
+
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && !list_empty(&q->old_flows))
+ list_move_tail(&flow->flowchain, &q->old_flows);
+ else
+ list_del_init(&flow->flowchain);
+ goto begin;
+ }
+ qdisc_bstats_update(sch, skb);
+ flow->deficit -= qdisc_pkt_len(skb);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->cstats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+ }
+ return skb;
+}
+
+static void fq_codel_reset(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_codel_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
+ [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
+};
+
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
+ if (err < 0)
+ return err;
+ if (tb[TCA_FQ_CODEL_FLOWS]) {
+ if (q->flows)
+ return -EINVAL;
+ q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
+ if (!q->flows_cnt ||
+ q->flows_cnt > 65536)
+ return -EINVAL;
+ }
+ sch_tree_lock(sch);
+
+ if (tb[TCA_FQ_CODEL_TARGET]) {
+ u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
+
+ q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_FQ_CODEL_INTERVAL]) {
+ u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
+
+ q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_FQ_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
+
+ if (tb[TCA_FQ_CODEL_ECN])
+ q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+
+ if (tb[TCA_FQ_CODEL_QUANTUM])
+ q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
+
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_codel_dequeue(sch);
+
+ kfree_skb(skb);
+ q->cstats.drop_count++;
+ }
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static void *fq_codel_zalloc(size_t sz)
+{
+ void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+ if (!ptr)
+ ptr = vzalloc(sz);
+ return ptr;
+}
+
+static void fq_codel_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static void fq_codel_destroy(struct Qdisc *sch)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ fq_codel_free(q->backlogs);
+ fq_codel_free(q->flows);
+}
+
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ int i;
+
+ sch->limit = 10*1024;
+ q->flows_cnt = 1024;
+ q->quantum = psched_mtu(qdisc_dev(sch));
+ q->perturbation = net_random();
+ INIT_LIST_HEAD(&q->new_flows);
+ INIT_LIST_HEAD(&q->old_flows);
+ codel_params_init(&q->cparams);
+ codel_stats_init(&q->cstats);
+ q->cparams.ecn = true;
+
+ if (opt) {
+ int err = fq_codel_change(sch, opt);
+ if (err)
+ return err;
+ }
+
+ if (!q->flows) {
+ q->flows = fq_codel_zalloc(q->flows_cnt *
+ sizeof(struct fq_codel_flow));
+ if (!q->flows)
+ return -ENOMEM;
+ q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
+ if (!q->backlogs) {
+ fq_codel_free(q->flows);
+ return -ENOMEM;
+ }
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ }
+ }
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+}
+
+static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
+ codel_time_to_us(q->cparams.target)) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
+ codel_time_to_us(q->cparams.interval)) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_ECN,
+ q->cparams.ecn) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
+ q->quantum) ||
+ nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
+ q->flows_cnt))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, opts);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tc_fq_codel_xstats st = {
+ .type = TCA_FQ_CODEL_XSTATS_QDISC,
+ .qdisc_stats.maxpacket = q->cstats.maxpacket,
+ .qdisc_stats.drop_overlimit = q->drop_overlimit,
+ .qdisc_stats.ecn_mark = q->cstats.ecn_mark,
+ .qdisc_stats.new_flow_count = q->new_flow_count,
+ };
+ struct list_head *pos;
+
+ list_for_each(pos, &q->new_flows)
+ st.qdisc_stats.new_flows_len++;
+
+ list_for_each(pos, &q->old_flows)
+ st.qdisc_stats.old_flows_len++;
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ /* we cannot bypass queue discipline anymore */
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+}
+
+static void fq_codel_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ u32 idx = cl - 1;
+ struct gnet_stats_queue qs = { 0 };
+ struct tc_fq_codel_xstats xstats;
+
+ if (idx < q->flows_cnt) {
+ const struct fq_codel_flow *flow = &q->flows[idx];
+ const struct sk_buff *skb = flow->head;
+
+ memset(&xstats, 0, sizeof(xstats));
+ xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
+ xstats.class_stats.deficit = flow->deficit;
+ xstats.class_stats.ldelay =
+ codel_time_to_us(flow->cvars.ldelay);
+ xstats.class_stats.count = flow->cvars.count;
+ xstats.class_stats.lastcount = flow->cvars.lastcount;
+ xstats.class_stats.dropping = flow->cvars.dropping;
+ if (flow->cvars.dropping) {
+ codel_tdiff_t delta = flow->cvars.drop_next -
+ codel_get_time();
+
+ xstats.class_stats.drop_next = (delta >= 0) ?
+ codel_time_to_us(delta) :
+ -codel_time_to_us(-delta);
+ }
+ while (skb) {
+ qs.qlen++;
+ skb = skb->next;
+ }
+ qs.backlog = q->backlogs[idx];
+ qs.drops = flow->dropped;
+ }
+ if (gnet_stats_copy_queue(d, &qs) < 0)
+ return -1;
+ if (idx < q->flows_cnt)
+ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+ return 0;
+}
+
+static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ unsigned int i;
+
+ if (arg->stop)
+ return;
+
+ for (i = 0; i < q->flows_cnt; i++) {
+ if (list_empty(&q->flows[i].flowchain) ||
+ arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops fq_codel_class_ops = {
+ .leaf = fq_codel_leaf,
+ .get = fq_codel_get,
+ .put = fq_codel_put,
+ .tcf_chain = fq_codel_find_tcf,
+ .bind_tcf = fq_codel_bind,
+ .unbind_tcf = fq_codel_put,
+ .dump = fq_codel_dump_class,
+ .dump_stats = fq_codel_dump_class_stats,
+ .walk = fq_codel_walk,
+};
+
+static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
+ .cl_ops = &fq_codel_class_ops,
+ .id = "fq_codel",
+ .priv_size = sizeof(struct fq_codel_sched_data),
+ .enqueue = fq_codel_enqueue,
+ .dequeue = fq_codel_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .drop = fq_codel_drop,
+ .init = fq_codel_init,
+ .reset = fq_codel_reset,
+ .destroy = fq_codel_destroy,
+ .change = fq_codel_change,
+ .dump = fq_codel_dump,
+ .dump_stats = fq_codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init fq_codel_module_init(void)
+{
+ return register_qdisc(&fq_codel_qdisc_ops);
+}
+
+static void __exit fq_codel_module_exit(void)
+{
+ unregister_qdisc(&fq_codel_qdisc_ops);
+}
+
+module_init(fq_codel_module_init)
+module_exit(fq_codel_module_exit)
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");

View file

@ -1,33 +0,0 @@
From 18c12e496ead3306de00a82d0bc73d71b34d7c24 Mon Sep 17 00:00:00 2001
From: Geert Uytterhoeven <geert@linux-m68k.org>
Date: Mon, 14 May 2012 09:47:05 +0000
Subject: [PATCH] net/codel: Add missing #include <linux/prefetch.h>
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
commit ce5b4b977127ee20c3f9c3fd3637cd3796f649f5 upstream.
m68k allmodconfig:
net/sched/sch_codel.c: In function dequeue:
net/sched/sch_codel.c:70: error: implicit declaration of function prefetch
make[1]: *** [net/sched/sch_codel.o] Error 1
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/sched/sch_codel.c | 1 +
1 file changed, 1 insertion(+)
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -46,6 +46,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/prefetch.h>
#include <net/pkt_sched.h>
#include <net/codel.h>

View file

@ -1,51 +0,0 @@
From b49ab5f6bb7e609190065cb9a605de809e50ab60 Mon Sep 17 00:00:00 2001
From: Sasha Levin <levinsasha928@gmail.com>
Date: Mon, 14 May 2012 11:57:06 +0000
Subject: [PATCH] net: codel: fix build errors
commit 669d67bf777def468970f2dcba1537edf3b2d329 upstream.
Fix the following build error:
net/sched/sch_fq_codel.c: In function 'fq_codel_dump_stats':
net/sched/sch_fq_codel.c:464:3: error: unknown field 'qdisc_stats' specified in initializer
net/sched/sch_fq_codel.c:464:3: warning: missing braces around initializer
net/sched/sch_fq_codel.c:464:3: warning: (near initialization for 'st.<anonymous>')
net/sched/sch_fq_codel.c:465:3: error: unknown field 'qdisc_stats' specified in initializer
net/sched/sch_fq_codel.c:465:3: warning: excess elements in struct initializer
net/sched/sch_fq_codel.c:465:3: warning: (near initialization for 'st')
net/sched/sch_fq_codel.c:466:3: error: unknown field 'qdisc_stats' specified in initializer
net/sched/sch_fq_codel.c:466:3: warning: excess elements in struct initializer
net/sched/sch_fq_codel.c:466:3: warning: (near initialization for 'st')
net/sched/sch_fq_codel.c:467:3: error: unknown field 'qdisc_stats' specified in initializer
net/sched/sch_fq_codel.c:467:3: warning: excess elements in struct initializer
net/sched/sch_fq_codel.c:467:3: warning: (near initialization for 'st')
make[1]: *** [net/sched/sch_fq_codel.o] Error 1
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/sched/sch_fq_codel.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -461,13 +461,14 @@ static int fq_codel_dump_stats(struct Qd
struct fq_codel_sched_data *q = qdisc_priv(sch);
struct tc_fq_codel_xstats st = {
.type = TCA_FQ_CODEL_XSTATS_QDISC,
- .qdisc_stats.maxpacket = q->cstats.maxpacket,
- .qdisc_stats.drop_overlimit = q->drop_overlimit,
- .qdisc_stats.ecn_mark = q->cstats.ecn_mark,
- .qdisc_stats.new_flow_count = q->new_flow_count,
};
struct list_head *pos;
+ st.qdisc_stats.maxpacket = q->cstats.maxpacket;
+ st.qdisc_stats.drop_overlimit = q->drop_overlimit;
+ st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
+ st.qdisc_stats.new_flow_count = q->new_flow_count;
+
list_for_each(pos, &q->new_flows)
st.qdisc_stats.new_flows_len++;

View file

@ -1,86 +0,0 @@
From 03333931c17d9c62ba4063d4e4fec1578c0729a7 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Sat, 12 May 2012 21:23:23 +0000
Subject: [PATCH] codel: use u16 field instead of 31bits for rec_inv_sqrt
commit 6ff272c9ad65eda219cd975b9da2dbc31cc812ee upstream.
David pointed out gcc might generate poor code with 31bit fields.
Using u16 is more than enough and permits a better code output.
Also make the code intent more readable using constants, fixed point arithmetic
not being trivial for everybody.
Suggested-by: David Miller <davem@davemloft.net>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/net/codel.h | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -133,13 +133,17 @@ struct codel_params {
struct codel_vars {
u32 count;
u32 lastcount;
- bool dropping:1;
- u32 rec_inv_sqrt:31;
+ bool dropping;
+ u16 rec_inv_sqrt;
codel_time_t first_above_time;
codel_time_t drop_next;
codel_time_t ldelay;
};
+#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
+/* needed shift to get a Q0.32 number from rec_inv_sqrt */
+#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
+
/**
* struct codel_stats - contains codel shared variables and stats
* @maxpacket: largest packet we've seen so far
@@ -173,17 +177,18 @@ static void codel_stats_init(struct code
* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
* new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
*
- * Here, invsqrt is a fixed point number (< 1.0), 31bit mantissa)
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
*/
static void codel_Newton_step(struct codel_vars *vars)
{
- u32 invsqrt = vars->rec_inv_sqrt;
- u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 31;
- u64 val = (3LL << 31) - ((u64)vars->count * invsqrt2);
+ u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+ u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
- val = (val * invsqrt) >> 32;
+ val >>= 2; /* avoid overflow in following multiply */
+ val = (val * invsqrt) >> (32 - 2 + 1);
- vars->rec_inv_sqrt = val;
+ vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
}
/*
@@ -195,7 +200,7 @@ static codel_time_t codel_control_law(co
codel_time_t interval,
u32 rec_inv_sqrt)
{
- return t + reciprocal_divide(interval, rec_inv_sqrt << 1);
+ return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
}
@@ -326,7 +331,7 @@ static struct sk_buff *codel_dequeue(str
codel_Newton_step(vars);
} else {
vars->count = 1;
- vars->rec_inv_sqrt = 0x7fffffff;
+ vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
}
vars->lastcount = vars->count;
vars->drop_next = codel_control_law(now, params->interval,

View file

@ -1,132 +0,0 @@
From 7bd90773f89001ea4960ed47676b550137f3facb Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Wed, 16 May 2012 04:39:09 +0000
Subject: [PATCH] fq_codel: should use qdisc backlog as threshold
commit 865ec5523dadbedefbc5710a68969f686a28d928 upstream.
codel_should_drop() logic allows a packet being not dropped if queue
size is under max packet size.
In fq_codel, we have two possible backlogs : The qdisc global one, and
the flow local one.
The meaningful one for codel_should_drop() should be the global backlog,
not the per flow one, so that thin flows can have a non zero drop/mark
probability.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/net/codel.h | 15 +++++++--------
net/sched/sch_codel.c | 4 ++--
net/sched/sch_fq_codel.c | 5 +++--
3 files changed, 12 insertions(+), 12 deletions(-)
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(co
static bool codel_should_drop(const struct sk_buff *skb,
- unsigned int *backlog,
+ struct Qdisc *sch,
struct codel_vars *vars,
struct codel_params *params,
struct codel_stats *stats,
@@ -219,13 +219,13 @@ static bool codel_should_drop(const stru
}
vars->ldelay = now - codel_get_enqueue_time(skb);
- *backlog -= qdisc_pkt_len(skb);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
stats->maxpacket = qdisc_pkt_len(skb);
if (codel_time_before(vars->ldelay, params->target) ||
- *backlog <= stats->maxpacket) {
+ sch->qstats.backlog <= stats->maxpacket) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(str
struct codel_params *params,
struct codel_vars *vars,
struct codel_stats *stats,
- codel_skb_dequeue_t dequeue_func,
- u32 *backlog)
+ codel_skb_dequeue_t dequeue_func)
{
struct sk_buff *skb = dequeue_func(vars, sch);
codel_time_t now;
@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(str
return skb;
}
now = codel_get_time();
- drop = codel_should_drop(skb, backlog, vars, params, stats, now);
+ drop = codel_should_drop(skb, sch, vars, params, stats, now);
if (vars->dropping) {
if (!drop) {
/* sojourn time below target - leave dropping state */
@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(str
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
- if (!codel_should_drop(skb, backlog,
+ if (!codel_should_drop(skb, sch,
vars, params, stats, now)) {
/* leave dropping state */
vars->dropping = false;
@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(str
stats->drop_count++;
skb = dequeue_func(vars, sch);
- drop = codel_should_drop(skb, backlog, vars, params,
+ drop = codel_should_drop(skb, sch, vars, params,
stats, now);
}
vars->dropping = true;
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_deque
struct codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
- dequeue, &sch->qstats.backlog);
+ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+
/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_bu
*/
static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
struct fq_codel_flow *flow;
struct sk_buff *skb = NULL;
flow = container_of(vars, struct fq_codel_flow, cvars);
if (flow->head) {
skb = dequeue_head(flow);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
sch->q.qlen--;
}
return skb;
@@ -256,7 +257,7 @@ begin:
prev_ecn_mark = q->cstats.ecn_mark;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
- dequeue, &q->backlogs[flow - q->flows]);
+ dequeue);
flow->dropped += q->cstats.drop_count - prev_drop_count;
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;

View file

@ -1,603 +0,0 @@
commit ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0
Author: Linus Walleij <linus.walleij@linaro.org>
Date: Wed Feb 22 10:05:38 2012 +0100
spi: create a message queueing infrastructure
This rips the message queue in the PL022 driver out and pushes
it into (optional) common infrastructure. Drivers that want to
use the message pumping thread will need to define the new
per-messags transfer methods and leave the deprecated transfer()
method as NULL.
Most of the design is described in the documentation changes that
are included in this patch.
Since there is a queue that need to be stopped when the system
is suspending/resuming, two new calls are implemented for the
device drivers to call in their suspend()/resume() functions:
spi_master_suspend() and spi_master_resume().
ChangeLog v1->v2:
- Remove Kconfig entry and do not make the queue support optional
at all, instead be more agressive and have it as part of the
compulsory infrastructure.
- If the .transfer() method is implemented, delete print a small
deprecation notice and do not start the transfer pump.
- Fix a bitrotted comment.
ChangeLog v2->v3:
- Fix up a problematic sequence courtesy of Chris Blair.
- Stop rather than destroy the queue on suspend() courtesy of
Chris Blair.
Signed-off-by: Chris Blair <chris.blair@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Tested-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
[Florian: dropped the changes on drivers/spi/spi-pl022.c, removed
the dev_info() about unqueued drivers still using the master function]
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -1,7 +1,7 @@
Overview of Linux kernel SPI support
====================================
-21-May-2007
+02-Feb-2012
What is SPI?
------------
@@ -483,9 +483,9 @@ also initialize its own internal state.
and those methods.)
After you initialize the spi_master, then use spi_register_master() to
-publish it to the rest of the system. At that time, device nodes for
-the controller and any predeclared spi devices will be made available,
-and the driver model core will take care of binding them to drivers.
+publish it to the rest of the system. At that time, device nodes for the
+controller and any predeclared spi devices will be made available, and
+the driver model core will take care of binding them to drivers.
If you need to remove your SPI controller driver, spi_unregister_master()
will reverse the effect of spi_register_master().
@@ -521,21 +521,53 @@ SPI MASTER METHODS
** When you code setup(), ASSUME that the controller
** is actively processing transfers for another device.
- master->transfer(struct spi_device *spi, struct spi_message *message)
- This must not sleep. Its responsibility is arrange that the
- transfer happens and its complete() callback is issued. The two
- will normally happen later, after other transfers complete, and
- if the controller is idle it will need to be kickstarted.
-
master->cleanup(struct spi_device *spi)
Your controller driver may use spi_device.controller_state to hold
state it dynamically associates with that device. If you do that,
be sure to provide the cleanup() method to free that state.
+ master->prepare_transfer_hardware(struct spi_master *master)
+ This will be called by the queue mechanism to signal to the driver
+ that a message is coming in soon, so the subsystem requests the
+ driver to prepare the transfer hardware by issuing this call.
+ This may sleep.
+
+ master->unprepare_transfer_hardware(struct spi_master *master)
+ This will be called by the queue mechanism to signal to the driver
+ that there are no more messages pending in the queue and it may
+ relax the hardware (e.g. by power management calls). This may sleep.
+
+ master->transfer_one_message(struct spi_master *master,
+ struct spi_message *mesg)
+ The subsystem calls the driver to transfer a single message while
+ queuing transfers that arrive in the meantime. When the driver is
+ finished with this message, it must call
+ spi_finalize_current_message() so the subsystem can issue the next
+ transfer. This may sleep.
+
+ DEPRECATED METHODS
+
+ master->transfer(struct spi_device *spi, struct spi_message *message)
+ This must not sleep. Its responsibility is arrange that the
+ transfer happens and its complete() callback is issued. The two
+ will normally happen later, after other transfers complete, and
+ if the controller is idle it will need to be kickstarted. This
+ method is not used on queued controllers and must be NULL if
+ transfer_one_message() and (un)prepare_transfer_hardware() are
+ implemented.
+
SPI MESSAGE QUEUE
-The bulk of the driver will be managing the I/O queue fed by transfer().
+If you are happy with the standard queueing mechanism provided by the
+SPI subsystem, just implement the queued methods specified above. Using
+the message queue has the upside of centralizing a lot of code and
+providing pure process-context execution of methods. The message queue
+can also be elevated to realtime priority on high-priority SPI traffic.
+
+Unless the queueing mechanism in the SPI subsystem is selected, the bulk
+of the driver will be managing the I/O queue fed by the now deprecated
+function transfer().
That queue could be purely conceptual. For example, a driver used only
for low-frequency sensor access might be fine using synchronous PIO.
@@ -561,4 +593,6 @@ Stephen Street
Mark Underwood
Andrew Victor
Vitaly Wool
-
+Grant Likely
+Mark Brown
+Linus Walleij
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -30,6 +30,9 @@
#include <linux/of_spi.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
static void spidev_release(struct device *dev)
{
@@ -507,6 +510,293 @@ spi_register_board_info(struct spi_board
/*-------------------------------------------------------------------------*/
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the master struct
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and transfer each message.
+ *
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+ struct spi_master *master =
+ container_of(work, struct spi_master, pump_messages);
+ unsigned long flags;
+ bool was_busy = false;
+ int ret;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&master->queue_lock, flags);
+ if (list_empty(&master->queue) || !master->running) {
+ if (master->busy) {
+ ret = master->unprepare_transfer_hardware(master);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to unprepare transfer hardware\n");
+ return;
+ }
+ }
+ master->busy = false;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (master->cur_msg) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+ /* Extract head of queue */
+ master->cur_msg =
+ list_entry(master->queue.next, struct spi_message, queue);
+
+ list_del_init(&master->cur_msg->queue);
+ if (master->busy)
+ was_busy = true;
+ else
+ master->busy = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ if (!was_busy) {
+ ret = master->prepare_transfer_hardware(master);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare transfer hardware\n");
+ return;
+ }
+ }
+
+ ret = master->transfer_one_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to transfer one message from queue\n");
+ return;
+ }
+}
+
+static int spi_init_queue(struct spi_master *master)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+
+ INIT_LIST_HEAD(&master->queue);
+ spin_lock_init(&master->queue_lock);
+
+ master->running = false;
+ master->busy = false;
+
+ init_kthread_worker(&master->kworker);
+ master->kworker_task = kthread_run(kthread_worker_fn,
+ &master->kworker,
+ dev_name(&master->dev));
+ if (IS_ERR(master->kworker_task)) {
+ dev_err(&master->dev, "failed to create message pump task\n");
+ return -ENOMEM;
+ }
+ init_kthread_work(&master->pump_messages, spi_pump_messages);
+
+ /*
+ * Master config will indicate if this controller should run the
+ * message pump with high (realtime) priority to reduce the transfer
+ * latency on the bus by minimising the delay between a transfer
+ * request and the scheduling of the message pump thread. Without this
+ * setting the message pump thread will remain at default priority.
+ */
+ if (master->rt) {
+ dev_info(&master->dev,
+ "will run message pump with realtime priority\n");
+ sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
+ }
+
+ return 0;
+}
+
+/**
+ * spi_get_next_queued_message() - called by driver to check for queued
+ * messages
+ * @master: the master to check for queued messages
+ *
+ * If there are more messages in the queue, the next message is returned from
+ * this call.
+ */
+struct spi_message *spi_get_next_queued_message(struct spi_master *master)
+{
+ struct spi_message *next;
+ unsigned long flags;
+
+ /* get a pointer to the next message, if any */
+ spin_lock_irqsave(&master->queue_lock, flags);
+ if (list_empty(&master->queue))
+ next = NULL;
+ else
+ next = list_entry(master->queue.next,
+ struct spi_message, queue);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ return next;
+}
+EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+
+/**
+ * spi_finalize_current_message() - the current message is complete
+ * @master: the master to return the message to
+ *
+ * Called by the driver to notify the core that the message in the front of the
+ * queue is complete and can be removed from the queue.
+ */
+void spi_finalize_current_message(struct spi_master *master)
+{
+ struct spi_message *mesg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+ mesg = master->cur_msg;
+ master->cur_msg = NULL;
+
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ mesg->state = NULL;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_message);
+
+static int spi_start_queue(struct spi_master *master)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ if (master->running || master->busy) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return -EBUSY;
+ }
+
+ master->running = true;
+ master->cur_msg = NULL;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ return 0;
+}
+
+static int spi_stop_queue(struct spi_master *master)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int ret = 0;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the master->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead.
+ */
+ while ((!list_empty(&master->queue) || master->busy) && limit--) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&master->queue_lock, flags);
+ }
+
+ if (!list_empty(&master->queue) || master->busy)
+ ret = -EBUSY;
+ else
+ master->running = false;
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ if (ret) {
+ dev_warn(&master->dev,
+ "could not stop message queue\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int spi_destroy_queue(struct spi_master *master)
+{
+ int ret;
+
+ ret = spi_stop_queue(master);
+
+ /*
+ * flush_kthread_worker will block until all work is done.
+ * If the reason that stop_queue timed out is that the work will never
+ * finish, then it does no good to call flush/stop thread, so
+ * return anyway.
+ */
+ if (ret) {
+ dev_err(&master->dev, "problem destroying queue\n");
+ return ret;
+ }
+
+ flush_kthread_worker(&master->kworker);
+ kthread_stop(master->kworker_task);
+
+ return 0;
+}
+
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ if (!master->running) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+
+ list_add_tail(&msg->queue, &master->queue);
+ if (master->running && !master->busy)
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return 0;
+}
+
+static int spi_master_initialize_queue(struct spi_master *master)
+{
+ int ret;
+
+ master->queued = true;
+ master->transfer = spi_queued_transfer;
+
+ /* Initialize and start queue */
+ ret = spi_init_queue(master);
+ if (ret) {
+ dev_err(&master->dev, "problem initializing queue\n");
+ goto err_init_queue;
+ }
+ ret = spi_start_queue(master);
+ if (ret) {
+ dev_err(&master->dev, "problem starting queue\n");
+ goto err_start_queue;
+ }
+
+ return 0;
+
+err_start_queue:
+err_init_queue:
+ spi_destroy_queue(master);
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
static void spi_master_release(struct device *dev)
{
struct spi_master *master;
@@ -522,6 +812,7 @@ static struct class spi_master_class = {
};
+
/**
* spi_alloc_master - allocate SPI master controller
* @dev: the controller, possibly using the platform_bus
@@ -621,6 +912,15 @@ int spi_register_master(struct spi_maste
dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
dynamic ? " (dynamic)" : "");
+ /* If we're using a queued driver, start the queue */
+ if (!master->transfer) {
+ status = spi_master_initialize_queue(master);
+ if (status) {
+ device_unregister(&master->dev);
+ goto done;
+ }
+ }
+
mutex_lock(&board_lock);
list_add_tail(&master->list, &spi_master_list);
list_for_each_entry(bi, &board_list, list)
@@ -636,7 +936,6 @@ done:
}
EXPORT_SYMBOL_GPL(spi_register_master);
-
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
@@ -657,6 +956,11 @@ void spi_unregister_master(struct spi_ma
{
int dummy;
+ if (master->queued) {
+ if (spi_destroy_queue(master))
+ dev_err(&master->dev, "queue remove failed\n");
+ }
+
mutex_lock(&board_lock);
list_del(&master->list);
mutex_unlock(&board_lock);
@@ -666,6 +970,37 @@ void spi_unregister_master(struct spi_ma
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
+int spi_master_suspend(struct spi_master *master)
+{
+ int ret;
+
+ /* Basically no-ops for non-queued masters */
+ if (!master->queued)
+ return 0;
+
+ ret = spi_stop_queue(master);
+ if (ret)
+ dev_err(&master->dev, "queue stop failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_master_suspend);
+
+int spi_master_resume(struct spi_master *master)
+{
+ int ret;
+
+ if (!master->queued)
+ return 0;
+
+ ret = spi_start_queue(master);
+ if (ret)
+ dev_err(&master->dev, "queue restart failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_master_resume);
+
static int __spi_master_match(struct device *dev, void *data)
{
struct spi_master *m;
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -235,6 +236,27 @@ static inline void spi_unregister_driver
* the device whose settings are being modified.
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
+ * @queued: whether this master is providing an internal message queue
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @cur_msg: the currently in-flight message
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_transfer_hardware: a message will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the transfer hardware
+ * by issuing this call
+ * @transfer_one_message: the subsystem calls the driver to transfer a single
+ * message while queuing transfers that arrive in the meantime. When the
+ * driver is finished with this message, it must call
+ * spi_finalize_current_message() so the subsystem can issue the next
+ * transfer
+ * @prepare_transfer_hardware: there are currently no more messages on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
*
* Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -318,6 +340,28 @@ struct spi_master {
/* called on release() to free memory provided by spi_master */
void (*cleanup)(struct spi_device *spi);
+
+ /*
+ * These hooks are for drivers that want to use the generic
+ * master transfer queueing mechanism. If these are used, the
+ * transfer() function above must NOT be specified by the driver.
+ * Over time we expect SPI drivers to be phased over to this API.
+ */
+ bool queued;
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+ struct kthread_work pump_messages;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct spi_message *cur_msg;
+ bool busy;
+ bool running;
+ bool rt;
+
+ int (*prepare_transfer_hardware)(struct spi_master *master);
+ int (*transfer_one_message)(struct spi_master *master,
+ struct spi_message *mesg);
+ int (*unprepare_transfer_hardware)(struct spi_master *master);
};
static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -343,6 +387,13 @@ static inline void spi_master_put(struct
put_device(&master->dev);
}
+/* PM calls that need to be issued by the driver */
+extern int spi_master_suspend(struct spi_master *master);
+extern int spi_master_resume(struct spi_master *master);
+
+/* Calls the driver make to interact with the message queue */
+extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
+extern void spi_finalize_current_message(struct spi_master *master);
/* the spi driver core manages memory for the spi_master classdev */
extern struct spi_master *

View file

@ -1,39 +0,0 @@
From 7dfd2bd70228d1f8d468d58cb3d12ecd618479ed Mon Sep 17 00:00:00 2001
From: Shubhrajyoti D <shubhrajyoti@ti.com>
Date: Thu, 10 May 2012 19:20:41 +0530
Subject: [PATCH] spi: Dont call prepare/unprepare transfer if not populated
Currently the prepare/unprepare transfer are called unconditionally.
The assumption is that every driver using the spi core queue infrastructure
has to populate the prepare and unprepare functions. This encourages
drivers to populate empty functions to prevent crashing.
This patch prevents the call to prepare/unprepare if not populated.
Signed-off-by: Shubhrajyoti D <shubhrajyoti@ti.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
[grant.likely: fix whitespace defect]
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
---
drivers/spi/spi.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -530,7 +530,7 @@ static void spi_pump_messages(struct kth
/* Lock queue and check for queue work */
spin_lock_irqsave(&master->queue_lock, flags);
if (list_empty(&master->queue) || !master->running) {
- if (master->busy) {
+ if (master->busy && master->unprepare_transfer_hardware) {
ret = master->unprepare_transfer_hardware(master);
if (ret) {
dev_err(&master->dev,
@@ -559,7 +559,7 @@ static void spi_pump_messages(struct kth
master->busy = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
- if (!was_busy) {
+ if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
dev_err(&master->dev,

View file

@ -1,52 +0,0 @@
From b8fc328668a74e1314a19266755a54abd875e5a6 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <edumazet@google.com>
Date: Sun, 29 Jul 2012 20:52:21 +0000
Subject: [PATCH] codel: refine one condition to avoid a nul rec_inv_sqrt
commit 2359a47671fc4fb0fe5e9945f76c2cb10792c0f8 upstream.
One condition before codel_Newton_step() was not good if
we never left the dropping state for a flow. As a result
rec_inv_sqrt was 0, instead of the ~0 initial value.
codel control law was then set to a very aggressive mode, dropping
many packets before reaching 'target' and recovering from this problem.
To keep codel_vars_init() as efficient as possible, refine
the condition to make sure rec_inv_sqrt initial value is correct
Many thanks to Anton Mich for discovering the issue and suggesting
a fix.
Reported-by: Anton Mich <lp2s1h@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/net/codel.h | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(str
}
}
} else if (drop) {
+ u32 delta;
+
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
@@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(str
* assume that the drop rate that controlled the queue on the
* last cycle is a good starting point to control it now.
*/
- if (codel_time_before(now - vars->drop_next,
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
16 * params->interval)) {
- vars->count = (vars->count - vars->lastcount) | 1;
+ vars->count = delta;
/* we dont care if rec_inv_sqrt approximation
* is not very precise :
* Next Newton steps will correct it quadratically.

View file

@ -1,783 +0,0 @@
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,21 +125,26 @@
* The current exported interfaces for gathering environmental noise
* from the devices are:
*
+ * void add_device_randomness(const void *buf, unsigned int size);
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq);
+ * void add_interrupt_randomness(int irq, int irq_flags);
* void add_disk_randomness(struct gendisk *disk);
*
* add_input_randomness() uses the input layer interrupt timing, as well as
* the event type information from the hardware.
*
- * add_interrupt_randomness() uses the inter-interrupt timing as random
- * inputs to the entropy pool. Note that not all interrupts are good
- * sources of randomness! For example, the timer interrupts is not a
- * good choice, because the periodicity of the interrupts is too
- * regular, and hence predictable to an attacker. Network Interface
- * Controller interrupts are a better measure, since the timing of the
- * NIC interrupts are more unpredictable.
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the randomness roughly once a second.
+ *
+ * add_device_randomness() is for adding data to the random pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* add any actual entropy to the
+ * pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
*
* add_disk_randomness() uses what amounts to the seek time of block
* layer request events, on a per-disk_devt basis, as input to the
@@ -248,6 +253,7 @@
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
+#include <linux/ptrace.h>
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
@@ -256,8 +262,12 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
+#include <asm/irq_regs.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/random.h>
+
/*
* Configuration information
*/
@@ -420,8 +430,10 @@ struct entropy_store {
/* read-write data: */
spinlock_t lock;
unsigned add_ptr;
+ unsigned input_rotate;
int entropy_count;
- int input_rotate;
+ int entropy_total;
+ unsigned int initialized:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_
.pool = nonblocking_pool_data
};
+static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+
/*
* This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
- int nbytes, __u8 out[64])
+static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
{
- static __u32 const twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
int input_rotate;
int wordmask = r->poolinfo->poolwords - 1;
const char *bytes = in;
__u32 w;
- unsigned long flags;
- /* Taps are constant, so we can load them without holding r->lock. */
tap1 = r->poolinfo->tap1;
tap2 = r->poolinfo->tap2;
tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
- spin_lock_irqsave(&r->lock, flags);
- input_rotate = r->input_rotate;
- i = r->add_ptr;
+ smp_rmb();
+ input_rotate = ACCESS_ONCE(r->input_rotate);
+ i = ACCESS_ONCE(r->add_ptr);
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
@@ -513,19 +524,61 @@ static void mix_pool_bytes_extract(struc
input_rotate += i ? 7 : 14;
}
- r->input_rotate = input_rotate;
- r->add_ptr = i;
+ ACCESS_ONCE(r->input_rotate) = input_rotate;
+ ACCESS_ONCE(r->add_ptr) = i;
+ smp_wmb();
if (out)
for (j = 0; j < 16; j++)
((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+}
+
+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
+ _mix_pool_bytes(r, in, nbytes, out);
+}
+static void mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ unsigned long flags;
+
+ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
+ spin_lock_irqsave(&r->lock, flags);
+ _mix_pool_bytes(r, in, nbytes, out);
spin_unlock_irqrestore(&r->lock, flags);
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+struct fast_pool {
+ __u32 pool[4];
+ unsigned long last;
+ unsigned short count;
+ unsigned char rotate;
+ unsigned char last_timer_intr;
+};
+
+/*
+ * This is a fast mixing routine used by the interrupt randomness
+ * collector. It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
{
- mix_pool_bytes_extract(r, in, bytes, NULL);
+ const char *bytes = in;
+ __u32 w;
+ unsigned i = f->count;
+ unsigned input_rotate = f->rotate;
+
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
+ f->pool[(i + 1) & 3];
+ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate += (i++ & 3) ? 7 : 14;
+ }
+ f->count = i;
+ f->rotate = input_rotate;
}
/*
@@ -533,30 +586,38 @@ static void mix_pool_bytes(struct entrop
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- unsigned long flags;
- int entropy_count;
+ int entropy_count, orig;
if (!nbits)
return;
- spin_lock_irqsave(&r->lock, flags);
-
DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
- entropy_count = r->entropy_count;
+retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
entropy_count += nbits;
+
if (entropy_count < 0) {
DEBUG_ENT("negative entropy/overflow\n");
entropy_count = 0;
} else if (entropy_count > r->poolinfo->POOLBITS)
entropy_count = r->poolinfo->POOLBITS;
- r->entropy_count = entropy_count;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+
+ if (!r->initialized && nbits > 0) {
+ r->entropy_total += nbits;
+ if (r->entropy_total > 128)
+ r->initialized = 1;
+ }
+
+ trace_credit_entropy_bits(r->name, nbits, entropy_count,
+ r->entropy_total, _RET_IP_);
/* should we wake readers? */
if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
- spin_unlock_irqrestore(&r->lock, flags);
}
/*********************************************************************
@@ -609,6 +670,25 @@ static void set_timer_rand_state(unsigne
}
#endif
+/*
+ * Add device- or boot-specific data to the input and nonblocking
+ * pools to help initialize them to unique values.
+ *
+ * None of this adds any entropy, it is meant to avoid the
+ * problem of the nonblocking pool having similar initial state
+ * across largely identical devices.
+ */
+void add_device_randomness(const void *buf, unsigned int size)
+{
+ unsigned long time = get_cycles() ^ jiffies;
+
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+}
+EXPORT_SYMBOL(add_device_randomness);
+
static struct timer_rand_state input_timer_state;
/*
@@ -637,13 +717,9 @@ static void add_timer_randomness(struct
goto out;
sample.jiffies = jiffies;
-
- /* Use arch random value, fall back to cycles */
- if (!arch_get_random_int(&sample.cycles))
- sample.cycles = get_cycles();
-
+ sample.cycles = get_cycles();
sample.num = num;
- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
/*
* Calculate number of bits of randomness we probably added.
@@ -700,17 +776,48 @@ void add_input_randomness(unsigned int t
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-void add_interrupt_randomness(int irq)
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+void add_interrupt_randomness(int irq, int irq_flags)
{
- struct timer_rand_state *state;
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ __u32 input[4], cycles = get_cycles();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+ if (regs) {
+ __u64 ip = instruction_pointer(regs);
+ input[2] = ip;
+ input[3] = ip >> 32;
+ }
- state = get_timer_rand_state(irq);
+ fast_mix(fast_pool, input, sizeof(input));
- if (state == NULL)
+ if ((fast_pool->count & 1023) &&
+ !time_after(now, fast_pool->last + HZ))
return;
- DEBUG_ENT("irq event %d\n", irq);
- add_timer_randomness(state, 0x100 + irq);
+ fast_pool->last = now;
+
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
+ /*
+ * If we don't have a valid cycle counter, and we see
+ * back-to-back timer interrupts, then skip giving credit for
+ * any entropy.
+ */
+ if (cycles == 0) {
+ if (irq_flags & __IRQF_TIMER) {
+ if (fast_pool->last_timer_intr)
+ return;
+ fast_pool->last_timer_intr = 1;
+ } else
+ fast_pool->last_timer_intr = 0;
+ }
+ credit_entropy_bits(r, 1);
}
#ifdef CONFIG_BLOCK
@@ -742,7 +849,11 @@ static ssize_t extract_entropy(struct en
*/
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
- __u32 tmp[OUTPUT_POOL_WORDS];
+ union {
+ __u32 tmp[OUTPUT_POOL_WORDS];
+ long hwrand[4];
+ } u;
+ int i;
if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
@@ -753,17 +864,22 @@ static void xfer_secondary_pool(struct e
/* pull at least as many as BYTES as wakeup BITS */
bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
/* but never more than the buffer size */
- bytes = min_t(int, bytes, sizeof(tmp));
+ bytes = min_t(int, bytes, sizeof(u.tmp));
DEBUG_ENT("going to reseed %s with %d bits "
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
- bytes = extract_entropy(r->pull, tmp, bytes,
+ bytes = extract_entropy(r->pull, u.tmp, bytes,
random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes);
+ mix_pool_bytes(r, u.tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8);
}
+ for (i = 0; i < 4; i++)
+ if (arch_get_random_long(&u.hwrand[i]))
+ break;
+ if (i)
+ mix_pool_bytes(r, &u.hwrand, i * sizeof(u.hwrand[0]), 0);
}
/*
@@ -822,9 +938,11 @@ static void extract_buf(struct entropy_s
int i;
__u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
+ unsigned long flags;
/* Generate a hash across the pool, 16 words (512 bits) at a time */
sha_init(hash);
+ spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
sha_transform(hash, (__u8 *)(r->pool + i), workspace);
@@ -837,7 +955,8 @@ static void extract_buf(struct entropy_s
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
+ __mix_pool_bytes(r, hash, sizeof(hash), extract);
+ spin_unlock_irqrestore(&r->lock, flags);
/*
* To avoid duplicates, we atomically extract a portion of the
@@ -860,12 +979,12 @@ static void extract_buf(struct entropy_s
}
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+ size_t nbytes, int min, int reserved)
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -873,6 +992,8 @@ static ssize_t extract_entropy(struct en
extract_buf(r, tmp);
if (fips_enabled) {
+ unsigned long flags;
+
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -898,6 +1019,7 @@ static ssize_t extract_entropy_user(stru
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -931,17 +1053,35 @@ static ssize_t extract_entropy_user(stru
/*
* This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for seeding TCP sequence
- * numbers, etc.
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not use the hw random number
+ * generator, if available; use get_random_bytes_arch() for that.
*/
void get_random_bytes(void *buf, int nbytes)
{
+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. The arch-specific hw RNG will
+ * almost certainly be faster than what we can do in software, but it
+ * is impossible to verify that it is implemented securely (as
+ * opposed, to, say, the AES encryption of a sequence number using a
+ * key known by the NSA). So it's useful if we need the speed, but
+ * only if we're willing to trust the hardware manufacturer not to
+ * have put in a back door.
+ */
+void get_random_bytes_arch(void *buf, int nbytes)
+{
char *p = buf;
+ trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes) {
unsigned long v;
int chunk = min(nbytes, (int)sizeof(unsigned long));
-
+
if (!arch_get_random_long(&v))
break;
@@ -950,9 +1090,11 @@ void get_random_bytes(void *buf, int nby
nbytes -= chunk;
}
- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ if (nbytes)
+ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
}
-EXPORT_SYMBOL(get_random_bytes);
+EXPORT_SYMBOL(get_random_bytes_arch);
+
/*
* init_std_data - initialize pool with system data
@@ -966,21 +1108,18 @@ EXPORT_SYMBOL(get_random_bytes);
static void init_std_data(struct entropy_store *r)
{
int i;
- ktime_t now;
- unsigned long flags;
+ ktime_t now = ktime_get_real();
+ unsigned long rv;
- spin_lock_irqsave(&r->lock, flags);
r->entropy_count = 0;
- spin_unlock_irqrestore(&r->lock, flags);
-
- now = ktime_get_real();
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
- if (!arch_get_random_long(&flags))
+ r->entropy_total = 0;
+ mix_pool_bytes(r, &now, sizeof(now), NULL);
+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+ if (!arch_get_random_long(&rv))
break;
- mix_pool_bytes(r, &flags, sizeof(flags));
+ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
}
static int rand_initialize(void)
@@ -1117,7 +1256,7 @@ write_pool(struct entropy_store *r, cons
count -= bytes;
p += bytes;
- mix_pool_bytes(r, buf, bytes);
+ mix_pool_bytes(r, buf, bytes, NULL);
cond_resched();
}
@@ -1274,6 +1413,7 @@ static int proc_do_uuid(ctl_table *table
}
static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
+extern ctl_table random_table[];
ctl_table random_table[] = {
{
.procname = "poolsize",
@@ -1339,7 +1479,7 @@ late_initcall(random_int_secret_init);
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
__u32 *hash;
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(in
u32 fatevent;
int err;
- add_interrupt_randomness(irq);
-
err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
event_regs, 3);
if (err)
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -24,6 +24,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -1896,6 +1897,14 @@ int usb_new_device(struct usb_device *ud
/* Tell the world! */
announce_device(udev);
+ if (udev->serial)
+ add_device_randomness(udev->serial, strlen(udev->serial));
+ if (udev->product)
+ add_device_randomness(udev->product, strlen(udev->product));
+ if (udev->manufacturer)
+ add_device_randomness(udev->manufacturer,
+ strlen(udev->manufacturer));
+
device_enable_async_suspend(&udev->dev);
/* Register the device. The device driver is responsible
* for configuring the device and invoking the add-device
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -50,11 +50,13 @@ struct rnd_state {
extern void rand_initialize_irq(int irq);
+extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
-extern void add_interrupt_randomness(int irq);
+extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
+extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
#ifndef MODULE
--- /dev/null
+++ b/include/trace/events/random.h
@@ -0,0 +1,134 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM random
+
+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RANDOM_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, bytes )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->bytes = bytes;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: bytes %d caller %pF",
+ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP)
+);
+
+TRACE_EVENT(credit_entropy_bits,
+ TP_PROTO(const char *pool_name, int bits, int entropy_count,
+ int entropy_total, unsigned long IP),
+
+ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, bits )
+ __field( int, entropy_count )
+ __field( int, entropy_total )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->bits = bits;
+ __entry->entropy_count = entropy_count;
+ __entry->entropy_total = entropy_total;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
+ "caller %pF", __entry->pool_name, __entry->bits,
+ __entry->entropy_count, __entry->entropy_total,
+ (void *)__entry->IP)
+);
+
+TRACE_EVENT(get_random_bytes,
+ TP_PROTO(int nbytes, unsigned long IP),
+
+ TP_ARGS(nbytes, IP),
+
+ TP_STRUCT__entry(
+ __field( int, nbytes )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->nbytes = nbytes;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+);
+
+DECLARE_EVENT_CLASS(random__extract_entropy,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, nbytes )
+ __field( int, entropy_count )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->nbytes = nbytes;
+ __entry->entropy_count = entropy_count;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
+ (void *)__entry->IP)
+);
+
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+
+
+#endif /* _TRACE_RANDOM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -117,7 +117,7 @@ irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t retval = IRQ_NONE;
- unsigned int random = 0, irq = desc->irq_data.irq;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
do {
irqreturn_t res;
@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc
/* Fall through to add to randomness */
case IRQ_HANDLED:
- random |= action->flags;
+ flags |= action->flags;
break;
default:
@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc
action = action->next;
} while (action);
- if (random & IRQF_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
+ add_interrupt_randomness(irq, flags);
if (!noirqdebug)
note_interrupt(irq, desc, retval);
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1176,6 +1176,7 @@ static int __dev_open(struct net_device
net_dmaengine_get();
dev_set_rx_mode(dev);
dev_activate(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
return ret;
@@ -4823,6 +4824,7 @@ int dev_set_mac_address(struct net_devic
err = ops->ndo_set_mac_address(dev, sa);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
return err;
}
EXPORT_SYMBOL(dev_set_mac_address);
@@ -5602,6 +5604,7 @@ int register_netdevice(struct net_device
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1371,6 +1371,7 @@ static int do_setlink(struct net_device
goto errout;
send_addr_notify = 1;
modified = 1;
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
if (tb[IFLA_MTU]) {

View file

@ -1,543 +0,0 @@
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -291,8 +291,7 @@ palmz71_gpio_setup(int early)
}
gpio_direction_input(PALMZ71_USBDETECT_GPIO);
if (request_irq(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
- palmz71_powercable, IRQF_SAMPLE_RANDOM,
- "palmz71-cable", 0))
+ palmz71_powercable, 0, "palmz71-cable", 0))
printk(KERN_ERR
"IRQ request for power cable failed!\n");
palmz71_powercable(gpio_to_irq(PALMZ71_USBDETECT_GPIO), 0);
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -455,7 +455,7 @@ static int lubbock_mci_init(struct devic
init_timer(&mmc_timer);
mmc_timer.data = (unsigned long) data;
return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int,
- IRQF_SAMPLE_RANDOM, "lubbock-sd-detect", data);
+ 0, "lubbock-sd-detect", data);
}
static int lubbock_mci_get_ro(struct device *dev)
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -617,9 +617,8 @@ static struct platform_device bq24022 =
static int magician_mci_init(struct device *dev,
irq_handler_t detect_irq, void *data)
{
- return request_irq(IRQ_MAGICIAN_SD, detect_irq,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
- "mmc card detect", data);
+ return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED,
+ "mmc card detect", data);
}
static void magician_mci_exit(struct device *dev, void *data)
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -332,8 +332,8 @@ static int trizeps4_mci_init(struct devi
int err;
err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_SAMPLE_RANDOM,
- "MMC card detect", data);
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "MMC card detect", data);
if (err) {
printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request"
"MMC card detect IRQ\n");
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -23,7 +23,6 @@
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
#include <linux/ptrace.h>
-#include <linux/random.h> /* for rand_initialize_irq() */
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/threads.h>
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1250,14 +1250,12 @@ int ldc_bind(struct ldc_channel *lp, con
snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
- err = request_irq(lp->cfg.rx_irq, ldc_rx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
+ err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
- err = request_irq(lp->cfg.tx_irq, ldc_tx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
+ err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -371,7 +371,7 @@ static irqreturn_t line_write_interrupt(
int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
{
const struct line_driver *driver = line->driver;
- int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM;
+ int err = 0, flags = IRQF_DISABLED | IRQF_SHARED;
if (input)
err = um_request_irq(driver->read_irq, fd, IRQ_READ,
@@ -807,7 +807,7 @@ void register_winch_irq(int fd, int tty_
.stack = stack });
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ IRQF_DISABLED | IRQF_SHARED,
"winch", winch) < 0) {
printk(KERN_ERR "register_winch_irq - failed to register "
"IRQ\n");
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -773,7 +773,7 @@ static int __init mconsole_init(void)
register_reboot_notifier(&reboot_notifier);
err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt,
- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ IRQF_DISABLED | IRQF_SHARED,
"mconsole", (void *)sock);
if (err) {
printk(KERN_ERR "Failed to get IRQ for management console\n");
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -100,7 +100,7 @@ static int port_accept(struct port_list
.port = port });
if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ IRQF_DISABLED | IRQF_SHARED,
"telnetd", conn)) {
printk(KERN_ERR "port_accept : failed to get IRQ for "
"telnetd\n");
@@ -184,7 +184,7 @@ void *port_data(int port_num)
}
if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ IRQF_DISABLED | IRQF_SHARED,
"port", port)) {
printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
goto out_close;
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -131,8 +131,7 @@ static int __init rng_init (void)
random_fd = err;
err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "random",
- NULL);
+ IRQF_DISABLED, "random", NULL);
if (err)
goto err_out_cleanup_hw;
--- a/arch/um/drivers/xterm_kern.c
+++ b/arch/um/drivers/xterm_kern.c
@@ -50,8 +50,7 @@ int xterm_fd(int socket, int *pid_out)
init_completion(&data->ready);
err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt,
- IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- "xterm", data);
+ IRQF_DISABLED | IRQF_SHARED, "xterm", data);
if (err) {
printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, "
"err = %d\n", err);
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -25,8 +25,7 @@ int write_sigio_irq(int fd)
int err;
err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
- IRQF_DISABLED|IRQF_SAMPLE_RANDOM, "write sigio",
- NULL);
+ IRQF_DISABLED, "write sigio", NULL);
if (err) {
printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
"err = %d\n", err);
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -71,20 +71,6 @@ Who: Luis R. Rodriguez <lrodriguez@ather
---------------------------
-What: IRQF_SAMPLE_RANDOM
-Check: IRQF_SAMPLE_RANDOM
-When: July 2009
-
-Why: Many of IRQF_SAMPLE_RANDOM users are technically bogus as entropy
- sources in the kernel's current entropy model. To resolve this, every
- input point to the kernel's entropy pool needs to better document the
- type of entropy source it actually is. This will be replaced with
- additional add_*_randomness functions in drivers/char/random.c
-
-Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
-
----------------------------
-
What: The ieee80211_regdom module parameter
When: March 2010 / desktop catchup
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -852,9 +852,8 @@ static int setup_blkring(struct xenbus_d
if (err)
goto fail;
- err = bind_evtchn_to_irqhandler(info->evtchn,
- blkif_interrupt,
- IRQF_SAMPLE_RANDOM, "blkif", info);
+ err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
+ "blkif", info);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -633,43 +633,6 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
-#ifndef CONFIG_GENERIC_HARDIRQS
-
-static struct timer_rand_state *irq_timer_state[NR_IRQS];
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- return irq_timer_state[irq];
-}
-
-static void set_timer_rand_state(unsigned int irq,
- struct timer_rand_state *state)
-{
- irq_timer_state[irq] = state;
-}
-
-#else
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
- return desc->timer_rand_state;
-}
-
-static void set_timer_rand_state(unsigned int irq,
- struct timer_rand_state *state)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
- desc->timer_rand_state = state;
-}
-#endif
-
/*
* Add device- or boot-specific data to the input and nonblocking
* pools to help initialize them to unique values.
@@ -1131,24 +1094,6 @@ static int rand_initialize(void)
}
module_init(rand_initialize);
-void rand_initialize_irq(int irq)
-{
- struct timer_rand_state *state;
-
- state = get_timer_rand_state(irq);
-
- if (state)
- return;
-
- /*
- * If kzalloc returns null, we just won't use that entropy
- * source.
- */
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state)
- set_timer_rand_state(irq, state);
-}
-
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
{
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1607,8 +1607,7 @@ static int spu_map_ino(struct platform_d
sprintf(p->irq_name, "%s-%d", irq_name, index);
- return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
- p->irq_name, p);
+ return request_irq(p->irq, handler, 0, p->irq_name, p);
}
static struct kmem_cache *queue_cache[2];
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -545,8 +545,7 @@ static int vmbus_bus_init(int irq)
if (ret)
goto err_cleanup;
- ret = request_irq(irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
- driver_name, hv_acpi_dev);
+ ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
if (ret != 0) {
pr_err("Unable to request IRQ %d\n",
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -306,8 +306,7 @@ static int __devinit pmcmsptwi_probe(str
pmcmsptwi_data.irq = platform_get_irq(pldev, 0);
if (pmcmsptwi_data.irq) {
rc = request_irq(pmcmsptwi_data.irq, &pmcmsptwi_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- pldev->name, &pmcmsptwi_data);
+ IRQF_SHARED, pldev->name, &pmcmsptwi_data);
if (rc == 0) {
/*
* Enable 'DONE' interrupt only.
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -879,7 +879,7 @@ static int __init hp_sdc_init(void)
#endif
errstr = "IRQ not available for";
- if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED|IRQF_SAMPLE_RANDOM,
+ if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED,
"HP SDC", &hp_sdc))
goto err1;
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -937,9 +937,6 @@ static int __devinit ab3100_probe(struct
err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
IRQF_ONESHOT, "ab3100-core", ab3100);
- /* This real unpredictable IRQ is of course sampled for entropy */
- rand_initialize_irq(client->irq);
-
if (err)
goto exit_no_irq;
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -563,8 +563,7 @@ static int tps65010_probe(struct i2c_cli
*/
if (client->irq > 0) {
status = request_irq(client->irq, tps65010_irq,
- IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_FALLING,
- DRIVER_NAME, tps);
+ IRQF_TRIGGER_FALLING, DRIVER_NAME, tps);
if (status < 0) {
dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
client->irq, status);
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -9415,7 +9415,7 @@ static int tg3_test_interrupt(struct tg3
}
err = request_irq(tnapi->irq_vec, tg3_test_isr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
+ IRQF_SHARED, dev->name, tnapi);
if (err)
return err;
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -24,11 +24,7 @@
static inline unsigned int get_irq_flags(struct resource *res)
{
- unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
-
- flags |= res->flags & IRQF_TRIGGER_MASK;
-
- return flags;
+ return IRQF_SHARED | (res->flags & IRQF_TRIGGER_MASK);
}
static struct device *dev;
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -216,8 +216,7 @@ static int ulite_startup(struct uart_por
{
int ret;
- ret = request_irq(port->irq, ulite_isr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM, "uartlite", port);
+ ret = request_irq(port->irq, ulite_isr, IRQF_SHARED, "uartlite", port);
if (ret)
return ret;
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1839,7 +1839,7 @@ static int goku_probe(struct pci_dev *pd
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
- if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
+ if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2943,7 +2943,7 @@ known:
/* USB general purpose IRQ: ep0, state changes, dma, etc */
status = request_irq(pdev->resource[1].start, omap_udc_irq,
- IRQF_SAMPLE_RANDOM, driver_name, udc);
+ 0, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
@@ -2952,7 +2952,7 @@ known:
/* USB "non-iso" IRQ (PIO for all but ep0) */
status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
- IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
+ 0, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2202,19 +2202,15 @@ static int __init pxa25x_udc_probe(struc
#ifdef CONFIG_ARCH_LUBBOCK
if (machine_is_lubbock()) {
- retval = request_irq(LUBBOCK_USB_DISC_IRQ,
- lubbock_vbus_irq,
- IRQF_SAMPLE_RANDOM,
- driver_name, dev);
+ retval = request_irq(LUBBOCK_USB_DISC_IRQ, lubbock_vbus_irq,
+ 0, driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
driver_name, LUBBOCK_USB_DISC_IRQ, retval);
goto err_irq_lub;
}
- retval = request_irq(LUBBOCK_USB_IRQ,
- lubbock_vbus_irq,
- IRQF_SAMPLE_RANDOM,
- driver_name, dev);
+ retval = request_irq(LUBBOCK_USB_IRQ, lubbock_vbus_irq,
+ 0, driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
driver_name, LUBBOCK_USB_IRQ, retval);
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -51,8 +51,7 @@ struct gpio_vbus_data {
* edges might be workable.
*/
#define VBUS_IRQ_FLAGS \
- ( IRQF_SAMPLE_RANDOM | IRQF_SHARED \
- | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING )
+ ( IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING )
/* interface to regulator framework */
@@ -253,7 +252,7 @@ static int __init gpio_vbus_probe(struct
if (res) {
irq = res->start;
res->flags &= IRQF_TRIGGER_MASK;
- res->flags |= IRQF_SAMPLE_RANDOM | IRQF_SHARED;
+ res->flags |= IRQF_SHARED;
} else
irq = gpio_to_irq(gpio);
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1567,7 +1567,6 @@ isp1301_probe(struct i2c_client *i2c, co
isp->irq_type = IRQF_TRIGGER_FALLING;
}
- isp->irq_type |= IRQF_SAMPLE_RANDOM;
status = request_irq(i2c->irq, isp1301_irq,
isp->irq_type, DRIVER_NAME, isp);
if (status < 0) {
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -44,7 +44,6 @@
*
* IRQF_DISABLED - keep irqs disabled when calling the action handler.
* DEPRECATED. This flag is a NOOP and scheduled to be removed
- * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
@@ -63,7 +62,6 @@
* resume time.
*/
#define IRQF_DISABLED 0x00000020
-#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
#define __IRQF_TIMER 0x00000200
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -39,7 +39,6 @@ struct module;
*/
struct irq_desc {
struct irq_data irq_data;
- struct timer_rand_state *timer_rand_state;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -48,8 +48,6 @@ struct rnd_state {
#ifdef __KERNEL__
-extern void rand_initialize_irq(int irq);
-
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -891,22 +891,6 @@ __setup_irq(unsigned int irq, struct irq
return -ENOSYS;
if (!try_module_get(desc->owner))
return -ENODEV;
- /*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
- */
- if (new->flags & IRQF_SAMPLE_RANDOM) {
- /*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
- */
- rand_initialize_irq(irq);
- }
/*
* Check whether the interrupt nests into another interrupt
@@ -1342,7 +1326,6 @@ EXPORT_SYMBOL(free_irq);
* Flags:
*
* IRQF_SHARED Interrupt is shared
- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/

View file

@ -1,36 +0,0 @@
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -476,8 +476,10 @@ static const struct usb_device_id hso_id
{USB_DEVICE(0x0af0, 0x8400)},
{USB_DEVICE(0x0af0, 0x8600)},
{USB_DEVICE(0x0af0, 0x8800)},
- {USB_DEVICE(0x0af0, 0x8900)},
- {USB_DEVICE(0x0af0, 0x9000)},
+ {USB_DEVICE(0x0af0, 0x8900)}, /* GTM 67xx */
+ {USB_DEVICE(0x0af0, 0x9000)}, /* GTM 66xx */
+ {USB_DEVICE(0x0af0, 0x9200)}, /* GTM 67xxWFS */
+ {USB_DEVICE(0x0af0, 0x9300)}, /* GTM 66xxWFS */
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1231,6 +1231,18 @@ UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
0 ),
+UNUSUAL_DEV( 0x0af0, 0x9200, 0x0000, 0x0000,
+ "Option",
+ "Globetrotter 67xxWFS SD-Card",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x9300, 0x0000, 0x0000,
+ "Option",
+ "Globetrotter 66xxWFS SD-Card",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 0 ),
+
UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
"Option",
"GI 070x SD-Card",

View file

@ -1,61 +0,0 @@
From patchwork Mon Aug 6 21:04:43 2012
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: [net-next] tcp: ecn: dont delay ACKS after CE
Date: Mon, 06 Aug 2012 11:04:43 -0000
From: Eric Dumazet <eric.dumazet@gmail.com>
X-Patchwork-Id: 175453
Message-Id: <1344287083.26674.83.camel@edumazet-glaptop>
To: David Miller <davem@davemloft.net>
Cc: netdev <netdev@vger.kernel.org>,
Neal Cardwell <ncardwell@google.com>
From: Eric Dumazet <edumazet@google.com>
While playing with CoDel and ECN marking, I discovered a
non optimal behavior of receiver of CE (Congestion Encountered)
segments.
In pathological cases, sender has reduced its cwnd to low values,
and receiver delays its ACK (by 40 ms).
While RFC 3168 6.1.3 (The TCP Receiver) doesn't explicitly recommend
to send immediate ACKS, we believe its better to not delay ACKS, because
a CE segment should give same signal than a dropped segment, and its
quite important to reduce RTT to give ECE/CWR signals as fast as
possible.
Note we already call tcp_enter_quickack_mode() from TCP_ECN_check_ce()
if we receive a retransmit, for the same reason.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
---
net/ipv4/tcp_input.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -231,7 +231,11 @@ static inline void TCP_ECN_check_ce(stru
tcp_enter_quickack_mode((struct sock *)tp);
break;
case INET_ECN_CE:
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+ /* Better not delay acks, sender can have a very low cwnd */
+ tcp_enter_quickack_mode((struct sock *)tp);
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ }
/* fallinto */
default:
tp->ecn_flags |= TCP_ECN_SEEN;

View file

@ -1,36 +0,0 @@
From b379135c40163ae79ba7a54e6928b53983e74ee8 Mon Sep 17 00:00:00 2001
From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Sat, 1 Sep 2012 03:19:57 +0000
Subject: [PATCH 307/558] fq_codel: dont reinit flow state
When fq_codel builds a new flow, it should not reset codel state.
Codel algo needs to get previous values (lastcount, drop_next) to get
proper behavior.
Signed-off-by: Dave Taht <dave.taht@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Dave Taht <dave.taht@bufferbloat.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/sched/sch_fq_codel.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_bu
if (list_empty(&flow->flowchain)) {
list_add_tail(&flow->flowchain, &q->new_flows);
- codel_vars_init(&flow->cvars);
q->new_flow_count++;
flow->deficit = q->quantum;
flow->dropped = 0;
@@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *s
struct fq_codel_flow *flow = q->flows + i;
INIT_LIST_HEAD(&flow->flowchain);
+ codel_vars_init(&flow->cvars);
}
}
if (sch->limit >= 1)

View file

@ -1,187 +0,0 @@
commit 01ffc0a7f1c1801a2354719dedbc32aff45b987d
Author: David Woodhouse <dwmw2@infradead.org>
Date: Sat Nov 24 12:11:21 2012 +0000
8139cp: re-enable interrupts after tx timeout
Recovery doesn't work too well if we leave interrupts disabled...
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit 871f0d4c153e1258d4becf306eca6761bf38b629
Author: David Woodhouse <dwmw2@infradead.org>
Date: Thu Nov 22 03:16:58 2012 +0000
8139cp: enable bql
This adds support for byte queue limits on RTL8139C+
Tested on real hardware.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-By: Dave Täht <dave.taht@bufferbloat.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit a9dbe40fc10cea2efe6e1ff9e03c62dd7579c5ba
Author: David Woodhouse <dwmw2@infradead.org>
Date: Wed Nov 21 10:27:19 2012 +0000
8139cp: set ring address after enabling C+ mode
This fixes (for me) a regression introduced by commit b01af457 ("8139cp:
set ring address before enabling receiver"). That commit configured the
descriptor ring addresses earlier in the initialisation sequence, in
order to avoid the possibility of triggering stray DMA before the
correct address had been set up.
Unfortunately, it seems that the hardware will scribble garbage into the
TxRingAddr registers when we enable "plus mode" Tx in the CpCmd
register. Observed on a Traverse Geos router board.
To deal with this, while not reintroducing the problem which led to the
original commit, we augment cp_start_hw() to write to the CpCmd register
*first*, then set the descriptor ring addresses, and then finally to
enable Rx and Tx in the original 8139 Cmd register. The datasheet
actually indicates that we should enable Tx/Rx in the Cmd register
*before* configuring the descriptor addresses, but that would appear to
re-introduce the problem that the offending commit b01af457 was trying
to solve. And this variant appears to work fine on real hardware.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@kernel.org [3.5+]
Signed-off-by: David S. Miller <davem@davemloft.net>
commit 071e3ef4a94a021b16a2912f3885c86f4ff36b49
Author: David S. Miller <davem@davemloft.net>
Date: Sun Nov 25 15:52:09 2012 -0500
Revert "8139cp: revert "set ring address before enabling receiver""
This reverts commit b26623dab7eeb1e9f5898c7a49458789dd492f20.
This reverts the revert, in net-next we'll try another scheme
to fix this bug using patches from David Woodhouse.
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -645,6 +645,7 @@ static void cp_tx (struct cp_private *cp
{
unsigned tx_head = cp->tx_head;
unsigned tx_tail = cp->tx_tail;
+ unsigned bytes_compl = 0, pkts_compl = 0;
while (tx_tail != tx_head) {
struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -663,6 +664,9 @@ static void cp_tx (struct cp_private *cp
le32_to_cpu(txd->opts1) & 0xffff,
PCI_DMA_TODEVICE);
+ bytes_compl += skb->len;
+ pkts_compl++;
+
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
netif_dbg(cp, tx_err, cp->dev,
@@ -694,6 +698,7 @@ static void cp_tx (struct cp_private *cp
cp->tx_tail = tx_tail;
+ netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(cp->dev);
}
@@ -840,6 +845,8 @@ static netdev_tx_t cp_start_xmit (struct
wmb();
}
cp->tx_head = entry;
+
+ netdev_sent_queue(dev, skb->len);
netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -934,6 +941,8 @@ static void cp_stop_hw (struct cp_privat
cp->rx_tail = 0;
cp->tx_head = cp->tx_tail = 0;
+
+ netdev_reset_queue(cp->dev);
}
static void cp_reset_hw (struct cp_private *cp)
@@ -954,8 +963,38 @@ static void cp_reset_hw (struct cp_priva
static inline void cp_start_hw (struct cp_private *cp)
{
+ dma_addr_t ring_dma;
+
cpw16(CpCmd, cp->cpcmd);
+
+ /*
+ * These (at least TxRingAddr) need to be configured after the
+ * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
+ * (C+ Command Register) recommends that these and more be configured
+ * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
+ * it's been observed that the TxRingAddr is actually reset to garbage
+ * when C+ mode Tx is enabled in CpCmd.
+ */
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ /*
+ * Strictly speaking, the datasheet says this should be enabled
+ * *before* setting the descriptor addresses. But what, then, would
+ * prevent it from doing DMA to random unconfigured addresses?
+ * This variant appears to work fine.
+ */
cpw8(Cmd, RxOn | TxOn);
+
+ netdev_reset_queue(cp->dev);
}
static void cp_enable_irq(struct cp_private *cp)
@@ -966,7 +1005,6 @@ static void cp_enable_irq(struct cp_priv
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
- dma_addr_t ring_dma;
cp_reset_hw(cp);
@@ -989,17 +1027,6 @@ static void cp_init_hw (struct cp_privat
cpw8(Config5, cpr8(Config5) & PMEStatus);
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1188,6 +1215,7 @@ static void cp_tx_timeout(struct net_dev
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
+ cp_enable_irq(cp);
netif_wake_queue(dev);

View file

@ -1,68 +0,0 @@
From fc3a1f04f5040255cbc086c419e4237f29f89f88 Mon Sep 17 00:00:00 2001
From: Wolfram Sang <w.sang@pengutronix.de>
Date: Tue, 13 Dec 2011 18:34:01 +0100
Subject: [PATCH] gpio: add flags to export GPIOs when requesting
commit fc3a1f04f5040255cbc086c419e4237f29f89f88 upstream.
Introduce new flags to automatically export GPIOs when using the convenience
functions gpio_request_one() or gpio_request_array(). This eases support for
custom boards where lots of GPIOs need to be exported for customer
applications.
Signed-off-by: Wolfram Sang <w.sang@pengutronix.de>
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
---
Documentation/gpio.txt | 3 +++
drivers/gpio/gpiolib.c | 12 +++++++++++-
include/linux/gpio.h | 5 +++++
3 files changed, 19 insertions(+), 1 deletion(-)
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -303,6 +303,9 @@ where 'flags' is currently defined to sp
* GPIOF_INIT_LOW - as output, set initial level to LOW
* GPIOF_INIT_HIGH - as output, set initial level to HIGH
+ * GPIOF_EXPORT_DIR_FIXED - export gpio to sysfs, keep direction
+ * GPIOF_EXPORT_DIR_CHANGEABLE - also export, allow changing direction
+
since GPIOF_INIT_* are only valid when configured as output, so group valid
combinations as:
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1289,8 +1289,18 @@ int gpio_request_one(unsigned gpio, unsi
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
- gpio_free(gpio);
+ goto free_gpio;
+ if (flags & GPIOF_EXPORT) {
+ err = gpio_export(gpio, flags & GPIOF_EXPORT_CHANGEABLE);
+ if (err)
+ goto free_gpio;
+ }
+
+ return 0;
+
+ free_gpio:
+ gpio_free(gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -14,6 +14,11 @@
#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+#define GPIOF_EXPORT (1 << 2)
+#define GPIOF_EXPORT_CHANGEABLE (1 << 3)
+#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
+#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
+
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number

View file

@ -1,27 +0,0 @@
This was added in commit 46d3ceabd8d98ed0ad10f20c595ca784e34786c5 (tcp:
TCP Small Queues) but we need it for pppoatm too.
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -810,6 +810,8 @@ struct proto {
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
+ void (*release_cb)(struct sock *sk);
+
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2138,6 +2138,10 @@ void release_sock(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_backlog.tail)
__release_sock(sk);
+
+ if (sk->sk_prot->release_cb)
+ sk->sk_prot->release_cb(sk);
+
sk->sk_lock.owned = 0;
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);

View file

@ -1,271 +0,0 @@
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -393,6 +393,16 @@ config USB_CNS3XXX_OHCI
Enable support for the CNS3XXX SOC's on-chip OHCI controller.
It is needed for low-speed USB 1.0 device support.
+config USB_OHCI_HCD_PLATFORM
+ bool "Generic OHCI driver for a platform device"
+ depends on USB_OHCI_HCD && EXPERIMENTAL
+ default n
+ ---help---
+ Adds an OHCI host driver for a generic platform device, which
+ provieds a memory space and an irq.
+
+ If unsure, say N.
+
config USB_OHCI_BIG_ENDIAN_DESC
bool
depends on USB_OHCI_HCD
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1121,6 +1121,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_xls_driver
#endif
+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
+#include "ohci-platform.c"
+#define PLATFORM_DRIVER ohci_platform_driver
+#endif
+
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OMAP1_PLATFORM_DRIVER) && \
--- /dev/null
+++ b/drivers/usb/host/ohci-platform.c
@@ -0,0 +1,194 @@
+/*
+ * Generic platform ohci driver
+ *
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Derived from the OCHI-SSB driver
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/platform_device.h>
+#include <linux/usb/ohci_pdriver.h>
+
+static int ohci_platform_reset(struct usb_hcd *hcd)
+{
+ struct platform_device *pdev = to_platform_device(hcd->self.controller);
+ struct usb_ohci_pdata *pdata = pdev->dev.platform_data;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int err;
+
+ if (pdata->big_endian_desc)
+ ohci->flags |= OHCI_QUIRK_BE_DESC;
+ if (pdata->big_endian_mmio)
+ ohci->flags |= OHCI_QUIRK_BE_MMIO;
+ if (pdata->no_big_frame_no)
+ ohci->flags |= OHCI_QUIRK_FRAME_NO;
+
+ ohci_hcd_init(ohci);
+ err = ohci_init(ohci);
+
+ return err;
+}
+
+static int ohci_platform_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int err;
+
+ err = ohci_run(ohci);
+ if (err < 0) {
+ ohci_err(ohci, "can't start\n");
+ ohci_stop(hcd);
+ }
+
+ return err;
+}
+
+static const struct hc_driver ohci_platform_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Generic Platform OHCI Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_USB11,
+
+ .reset = ohci_platform_reset,
+ .start = ohci_platform_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ .get_frame_number = ohci_get_frame,
+
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int __devinit ohci_platform_probe(struct platform_device *dev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res_mem;
+ int irq;
+ int err = -ENOMEM;
+
+ BUG_ON(!dev->dev.platform_data);
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ pr_err("no irq provieded");
+ return irq;
+ }
+
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ pr_err("no memory recourse provieded");
+ return -ENXIO;
+ }
+
+ hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ pr_err("controller already in use");
+ err = -EBUSY;
+ goto err_put_hcd;
+ }
+
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs)
+ goto err_release_region;
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_iounmap;
+
+ platform_set_drvdata(dev, hcd);
+
+ return err;
+
+err_iounmap:
+ iounmap(hcd->regs);
+err_release_region:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_put_hcd:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int __devexit ohci_platform_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ platform_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int ohci_platform_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int ohci_platform_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ohci_finish_controller_resume(hcd);
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ohci_platform_suspend NULL
+#define ohci_platform_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct platform_device_id ohci_platform_table[] = {
+ { "ohci-platform", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ohci_platform_table);
+
+static const struct dev_pm_ops ohci_platform_pm_ops = {
+ .suspend = ohci_platform_suspend,
+ .resume = ohci_platform_resume,
+};
+
+static struct platform_driver ohci_platform_driver = {
+ .id_table = ohci_platform_table,
+ .probe = ohci_platform_probe,
+ .remove = __devexit_p(ohci_platform_remove),
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ohci-platform",
+ .pm = &ohci_platform_pm_ops,
+ }
+};
--- /dev/null
+++ b/include/linux/usb/ohci_pdriver.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __USB_CORE_OHCI_PDRIVER_H
+#define __USB_CORE_OHCI_PDRIVER_H
+
+/**
+ * struct usb_ohci_pdata - platform_data for generic ohci driver
+ *
+ * @big_endian_desc: BE descriptors
+ * @big_endian_mmio: BE registers
+ * @no_big_frame_no: no big endian frame_no shift
+ *
+ * These are general configuration options for the OHCI controller. All of
+ * these options are activating more or less workarounds for some hardware.
+ */
+struct usb_ohci_pdata {
+ unsigned big_endian_desc:1;
+ unsigned big_endian_mmio:1;
+ unsigned no_big_frame_no:1;
+};
+
+#endif /* __USB_CORE_OHCI_PDRIVER_H */

View file

@ -1,283 +0,0 @@
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -403,6 +403,16 @@ config USB_OHCI_HCD_PLATFORM
If unsure, say N.
+config USB_EHCI_HCD_PLATFORM
+ bool "Generic EHCI driver for a platform device"
+ depends on USB_EHCI_HCD && EXPERIMENTAL
+ default n
+ ---help---
+ Adds an EHCI host driver for a generic platform device, which
+ provieds a memory space and an irq.
+
+ If unsure, say N.
+
config USB_OHCI_BIG_ENDIAN_DESC
bool
depends on USB_OHCI_HCD
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1381,6 +1381,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_mv_driver
#endif
+#ifdef CONFIG_USB_EHCI_HCD_PLATFORM
+#include "ehci-platform.c"
+#define PLATFORM_DRIVER ehci_platform_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
--- /dev/null
+++ b/drivers/usb/host/ehci-platform.c
@@ -0,0 +1,198 @@
+/*
+ * Generic platform ehci driver
+ *
+ * Copyright 2007 Steven Brown <sbrown@cortland.com>
+ * Copyright 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Derived from the ohci-ssb driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the EHCI-PCI driver
+ * Copyright (c) 2000-2004 by David Brownell
+ *
+ * Derived from the ohci-pci driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/platform_device.h>
+#include <linux/usb/ehci_pdriver.h>
+
+static int ehci_platform_reset(struct usb_hcd *hcd)
+{
+ struct platform_device *pdev = to_platform_device(hcd->self.controller);
+ struct usb_ehci_pdata *pdata = pdev->dev.platform_data;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ hcd->has_tt = pdata->has_tt;
+ ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
+ ehci->big_endian_desc = pdata->big_endian_desc;
+ ehci->big_endian_mmio = pdata->big_endian_mmio;
+
+ ehci->caps = hcd->regs + pdata->caps_offset;
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ if (pdata->port_power_on)
+ ehci_port_power(ehci, 1);
+ if (pdata->port_power_off)
+ ehci_port_power(ehci, 0);
+
+ return 0;
+}
+
+static const struct hc_driver ehci_platform_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Generic Platform EHCI Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ .reset = ehci_platform_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ .get_frame_number = ehci_get_frame,
+
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#if defined(CONFIG_PM)
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .update_device = ehci_update_device,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int __devinit ehci_platform_probe(struct platform_device *dev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res_mem;
+ int irq;
+ int err = -ENOMEM;
+
+ BUG_ON(!dev->dev.platform_data);
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ pr_err("no irq provieded");
+ return irq;
+ }
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ pr_err("no memory recourse provieded");
+ return -ENXIO;
+ }
+
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ pr_err("controller already in use");
+ err = -EBUSY;
+ goto err_put_hcd;
+ }
+
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs)
+ goto err_release_region;
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_iounmap;
+
+ platform_set_drvdata(dev, hcd);
+
+ return err;
+
+err_iounmap:
+ iounmap(hcd->regs);
+err_release_region:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_put_hcd:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int __devexit ehci_platform_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ platform_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int ehci_platform_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
+
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd), wakeup);
+ return 0;
+}
+
+static int ehci_platform_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ehci_prepare_ports_for_controller_resume(hcd_to_ehci(hcd));
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ehci_platform_suspend NULL
+#define ehci_platform_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct platform_device_id ehci_platform_table[] = {
+ { "ehci-platform", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ehci_platform_table);
+
+static const struct dev_pm_ops ehci_platform_pm_ops = {
+ .suspend = ehci_platform_suspend,
+ .resume = ehci_platform_resume,
+};
+
+static struct platform_driver ehci_platform_driver = {
+ .id_table = ehci_platform_table,
+ .probe = ehci_platform_probe,
+ .remove = __devexit_p(ehci_platform_remove),
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ehci-platform",
+ .pm = &ehci_platform_pm_ops,
+ }
+};
--- /dev/null
+++ b/include/linux/usb/ehci_pdriver.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __USB_CORE_EHCI_PDRIVER_H
+#define __USB_CORE_EHCI_PDRIVER_H
+
+/**
+ * struct usb_ehci_pdata - platform_data for generic ehci driver
+ *
+ * @caps_offset: offset of the EHCI Capability Registers to the start of
+ * the io memory region provided to the driver.
+ * @has_tt: set to 1 if TT is integrated in root hub.
+ * @port_power_on: set to 1 if the controller needs a power up after
+ * initialization.
+ * @port_power_off: set to 1 if the controller needs to be powered down
+ * after initialization.
+ *
+ * These are general configuration options for the EHCI controller. All of
+ * these options are activating more or less workarounds for some hardware.
+ */
+struct usb_ehci_pdata {
+ int caps_offset;
+ unsigned has_tt:1;
+ unsigned has_synopsys_hc_bug:1;
+ unsigned big_endian_desc:1;
+ unsigned big_endian_mmio:1;
+ unsigned port_power_on:1;
+ unsigned port_power_off:1;
+};
+
+#endif /* __USB_CORE_EHCI_PDRIVER_H */

View file

@ -1,11 +0,0 @@
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -75,8 +75,6 @@ static const struct hc_driver ehci_platf
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
- .update_device = ehci_update_device,
-
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};

View file

@ -1,27 +0,0 @@
From 031d8ad2c3cee85c515e551fc8c0054bdedb7b8b Mon Sep 17 00:00:00 2001
From: Florian Fainelli <florian@openwrt.org>
Date: Thu, 13 Dec 2012 18:02:11 +0100
Subject: [PATCH] x86: fix perf build with uclibc toolchains
libio.h is not provided by uClibc, in order to be able to test the
definition of __UCLIBC__ we need to include stdlib.h, which also
includes stddef.h, providing the definition of 'NULL'
Signed-off-by: Florian Fainelli <florian@openwrt.org>
---
tools/perf/arch/x86/util/dwarf-regs.c | 3 +++
1 file changed, 3 insertions(+)
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -20,7 +20,10 @@
*
*/
+#include <stdlib.h>
+#ifndef __UCLIBC__
#include <libio.h>
+#endif
#include <dwarf-regs.h>
/*

View file

@ -1,27 +0,0 @@
From 6e601a53566d84e1ffd25e7b6fe0b6894ffd79c0 Mon Sep 17 00:00:00 2001
From: Mathias Krause <minipli@googlemail.com>
Date: Sat, 23 Feb 2013 01:13:47 +0000
Subject: sock_diag: Fix out-of-bounds access to sock_diag_handlers[]
Userland can send a netlink message requesting SOCK_DIAG_BY_FAMILY
with a family greater or equal then AF_MAX -- the array size of
sock_diag_handlers[]. The current code does not test for this
condition therefore is vulnerable to an out-of-bound access opening
doors for a privilege escalation.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -126,6 +126,9 @@ static int __sock_diag_rcv_msg(struct sk
if (nlmsg_len(nlh) < sizeof(*req))
return -EINVAL;
+ if (req->sdiag_family >= AF_MAX)
+ return -EINVAL;
+
hndl = sock_diag_lock_handler(req->sdiag_family);
if (hndl == NULL)
err = -ENOENT;

File diff suppressed because it is too large Load diff

View file

@ -1,41 +0,0 @@
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -795,7 +795,7 @@ static int ehci_run (struct usb_hcd *hcd
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
temp >> 8, temp & 0xff,
- ignore_oc ? ", overcurrent ignored" : "");
+ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
ehci_writel(ehci, INTR_MASK,
&ehci->regs->intr_enable); /* Turn On Interrupts */
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -578,7 +578,7 @@ ehci_hub_status_data (struct usb_hcd *hc
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
- if (!ignore_oc)
+ if (!ignore_oc && !ehci->ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
@@ -803,7 +803,7 @@ static int ehci_hub_control (
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
- if ((temp & PORT_OCC) && !ignore_oc){
+ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -147,6 +147,7 @@ struct ehci_hcd { /* one per controlle
unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned ignore_oc:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)

View file

@ -1,10 +0,0 @@
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -17,6 +17,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
struct map_info;
struct platform_device;

View file

@ -1,188 +0,0 @@
From 9d02daf754238adac48fa075ee79e7edd3d79ed3 Mon Sep 17 00:00:00 2001
From: David Woodhouse <dwmw2@infradead.org>
Date: Sun, 8 Apr 2012 09:55:43 +0000
Subject: [PATCH] pppoatm: Fix excessive queue bloat
We discovered that PPPoATM has an excessively deep transmit queue. A
queue the size of the default socket send buffer (wmem_default) is
maintained between the PPP generic core and the ATM device.
Fix it to queue a maximum of *two* packets. The one the ATM device is
currently working on, and one more for the ATM driver to process
immediately in its TX done interrupt handler. The PPP core is designed
to feed packets to the channel with minimal latency, so that really
ought to be enough to keep the ATM device busy.
While we're at it, fix the fact that we were triggering the wakeup
tasklet on *every* pppoatm_pop() call. The comment saying "this is
inefficient, but doing it right is too hard" turns out to be overly
pessimistic... I think :)
On machines like the Traverse Geos, with a slow Geode CPU and two
high-speed ADSL2+ interfaces, there were reports of extremely high CPU
usage which could partly be attributed to the extra wakeups.
(The wakeup handling could actually be made a whole lot easier if we
stop checking sk->sk_sndbuf altogether. Given that we now only queue
*two* packets ever, one wonders what the point is. As it is, you could
already deadlock the thing by setting the sk_sndbuf to a value lower
than the MTU of the device, and it'd just block for ever.)
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/atm/pppoatm.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++-----
1 files changed, 85 insertions(+), 10 deletions(-)
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -62,12 +62,25 @@ struct pppoatm_vcc {
void (*old_pop)(struct atm_vcc *, struct sk_buff *);
/* keep old push/pop for detaching */
enum pppoatm_encaps encaps;
+ atomic_t inflight;
+ unsigned long blocked;
int flags; /* SC_COMP_PROT - compress protocol */
struct ppp_channel chan; /* interface to generic ppp layer */
struct tasklet_struct wakeup_tasklet;
};
/*
+ * We want to allow two packets in the queue. The one that's currently in
+ * flight, and *one* queued up ready for the ATM device to send immediately
+ * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
+ * inflight == -2 represents an empty queue, -1 one packet, and zero means
+ * there are two packets in the queue.
+ */
+#define NONE_INFLIGHT -2
+
+#define BLOCKED 0
+
+/*
* Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
* ID (0xC021) used in autodetection
*/
@@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsign
static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
+
pvcc->old_pop(atmvcc, skb);
+ atomic_dec(&pvcc->inflight);
+
/*
- * We don't really always want to do this since it's
- * really inefficient - it would be much better if we could
- * test if we had actually throttled the generic layer.
- * Unfortunately then there would be a nasty SMP race where
- * we could clear that flag just as we refuse another packet.
- * For now we do the safe thing.
+ * We always used to run the wakeup tasklet unconditionally here, for
+ * fear of race conditions where we clear the BLOCKED flag just as we
+ * refuse another packet in pppoatm_send(). This was quite inefficient.
+ *
+ * In fact it's OK. The PPP core will only ever call pppoatm_send()
+ * while holding the channel->downl lock. And ppp_output_wakeup() as
+ * called by the tasklet will *also* grab that lock. So even if another
+ * CPU is in pppoatm_send() right now, the tasklet isn't going to race
+ * with it. The wakeup *will* happen after the other CPU is safely out
+ * of pppoatm_send() again.
+ *
+ * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
+ * it about to return, that's fine. We trigger a wakeup which will
+ * happen later. And if the CPU in pppoatm_send() *hasn't* set the
+ * BLOCKED bit yet, that's fine too because of the double check in
+ * pppoatm_may_send() which is commented there.
*/
- tasklet_schedule(&pvcc->wakeup_tasklet);
+ if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
+ tasklet_schedule(&pvcc->wakeup_tasklet);
}
/*
@@ -184,6 +211,51 @@ error:
ppp_input_error(&pvcc->chan, 0);
}
+static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
+{
+ /*
+ * It's not clear that we need to bother with using atm_may_send()
+ * to check we don't exceed sk->sk_sndbuf. If userspace sets a
+ * value of sk_sndbuf which is lower than the MTU, we're going to
+ * block for ever. But the code always did that before we introduced
+ * the packet count limit, so...
+ */
+ if (atm_may_send(pvcc->atmvcc, size) &&
+ atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+ return 1;
+
+ /*
+ * We use test_and_set_bit() rather than set_bit() here because
+ * we need to ensure there's a memory barrier after it. The bit
+ * *must* be set before we do the atomic_inc() on pvcc->inflight.
+ * There's no smp_mb__after_set_bit(), so it's this or abuse
+ * smp_mb__after_clear_bit().
+ */
+ test_and_set_bit(BLOCKED, &pvcc->blocked);
+
+ /*
+ * We may have raced with pppoatm_pop(). If it ran for the
+ * last packet in the queue, *just* before we set the BLOCKED
+ * bit, then it might never run again and the channel could
+ * remain permanently blocked. Cope with that race by checking
+ * *again*. If it did run in that window, we'll have space on
+ * the queue now and can return success. It's harmless to leave
+ * the BLOCKED flag set, since it's only used as a trigger to
+ * run the wakeup tasklet. Another wakeup will never hurt.
+ * If pppoatm_pop() is running but hasn't got as far as making
+ * space on the queue yet, then it hasn't checked the BLOCKED
+ * flag yet either, so we're safe in that case too. It'll issue
+ * an "immediate" wakeup... where "immediate" actually involves
+ * taking the PPP channel's ->downl lock, which is held by the
+ * code path that calls pppoatm_send(), and is thus going to
+ * wait for us to finish.
+ */
+ if (atm_may_send(pvcc->atmvcc, size) &&
+ atomic_inc_not_zero(&pvcc->inflight))
+ return 1;
+
+ return 0;
+}
/*
* Called by the ppp_generic.c to send a packet - returns true if packet
* was accepted. If we return false, then it's our job to call
@@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_chann
struct sk_buff *n;
n = skb_realloc_headroom(skb, LLC_LEN);
if (n != NULL &&
- !atm_may_send(pvcc->atmvcc, n->truesize)) {
+ !pppoatm_may_send(pvcc, n->truesize)) {
kfree_skb(n);
goto nospace;
}
@@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_chann
skb = n;
if (skb == NULL)
return DROP_PACKET;
- } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+ } else if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
break;
case e_vc:
- if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+ if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
break;
case e_autodetect:
@@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm
if (pvcc == NULL)
return -ENOMEM;
pvcc->atmvcc = atmvcc;
+
+ /* Maximum is zero, so that we can use atomic_inc_not_zero() */
+ atomic_set(&pvcc->inflight, NONE_INFLIGHT);
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
pvcc->encaps = (enum pppoatm_encaps) be.encaps;

View file

@ -1,900 +0,0 @@
commit d7d3d8f1ee4435e32bc6c93187798b7e2e3170a9
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Thu Nov 29 23:28:30 2012 +0000
solos-pci: remove list_vccs() debugging function
No idea why we've gone so long dumping a list of VCCs with vci==0 on
every ->open() call...
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit c93967bfd3a3dffa759a3f28370167bf3cdbc3d0
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Thu Nov 29 23:27:20 2012 +0000
solos-pci: use GFP_KERNEL where possible, not GFP_ATOMIC
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit ad6999e17ae4f7b99f6d28f425ae970acb115347
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Thu Nov 29 23:15:30 2012 +0000
solos-pci: clean up pclose() function
- Flush pending TX skbs from the queue rather than waiting for them all to
complete (suggested by Krzysztof Mazur <krzysiek@podlesie.net>).
- Clear ATM_VF_ADDR only when the PKT_PCLOSE packet has been submitted.
- Don't clear ATM_VF_READY at all — vcc_destroy_socket() does that for us.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit 2547e97f29d6d69d567947d5ef90b6b4fbcaf565
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Wed Nov 28 10:15:05 2012 +0000
pppoatm: optimise PPP channel wakeups after sock_owned_by_user()
We don't need to schedule the wakeup tasklet on *every* unlock; only if we
actually blocked the channel in the first place.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Krzysztof Mazur <krzysiek@podlesie.net>
commit 990b0884a2e9668c08e9daa4d70a54d65329cb6f
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Wed Nov 28 09:08:04 2012 +0100
br2684: allow assign only on a connected socket
The br2684 does not check if used vcc is in connected state,
causing potential Oops in pppoatm_send() when vcc->send() is called
on not fully connected socket.
Now br2684 can be assigned only on connected sockets; otherwise
-EINVAL error is returned.
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit f49b6da01f0abebb17f6241473d53018d923f6b0
Author: Nathan Williams <nathan@traverse.com.au>
Date: Tue Nov 27 17:34:09 2012 +1100
solos-pci: Fix leak of skb received for unknown vcc
... and ensure that the next skb is set up for RX in the DMA case.
Signed-off-by: Nathan Williams <nathan@traverse.com.au>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit a61d37ff4a555886c4ebe31d2c6d893afb6f4d3c
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Wed Nov 28 00:46:45 2012 +0000
br2684: fix module_put() race
The br2684 code used module_put() during unassignment from vcc with
hope that we have BKL. This assumption is no longer true.
Now owner field in atmvcc is used to move this module_put()
to vcc_destroy_socket().
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Krzysztof Mazur <krzysiek@podlesie.net>
commit c52f40629884ddc62c7af445fd5d620fdb466fb2
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Wed Nov 28 00:05:52 2012 +0000
pppoatm: fix missing wakeup in pppoatm_send()
Now that we can return zero from pppoatm_send() for reasons *other* than
the queue being full, that means we can't depend on a subsequent call to
pppoatm_pop() waking the queue, and we might leave it stalled
indefinitely.
Use the ->release_cb() callback to wake the queue after the sock is
unlocked.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Krzysztof Mazur <krzysiek@podlesie.net>
commit 35826e7372fe39b7db7930eda0267c82d68d1a4c
Author: David Woodhouse <dwmw2@infradead.org>
Date: Tue Nov 27 23:28:36 2012 +0000
br2684: don't send frames on not-ready vcc
Avoid submitting packets to a vcc which is being closed. Things go badly
wrong when the ->pop method gets later called after everything's been
torn down.
Use the ATM socket lock for synchronisation with vcc_destroy_socket(),
which clears the ATM_VF_READY bit under the same lock. Otherwise, we
could end up submitting a packet to the device driver even after its
->ops->close method has been called. And it could call the vcc's ->pop
method after the protocol has been shut down. Which leads to a panic.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Krzysztof Mazur <krzysiek@podlesie.net>
commit 7f940dde65de4a707f3dd723bb6ce9de90ca1eab
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Wed Nov 28 00:03:11 2012 +0000
atm: add release_cb() callback to vcc
The immediate use case for this is that it will allow us to ensure that a
pppoatm queue is woken after it has to drop a packet due to the sock being
locked.
Note that 'release_cb' is called when the socket is *unlocked*. This is
not to be confused with vcc_release() — which probably ought to be called
vcc_close().
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Krzysztof Mazur <krzysiek@podlesie.net>
commit def1b2f9083f84d0a77730e537c76429914d17c1
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Tue Nov 27 23:49:24 2012 +0000
solos-pci: wait for pending TX to complete when releasing vcc
We should no longer be calling the old pop routine for the vcc, after
vcc_release() has completed. Make sure we wait for any pending TX skbs
to complete, by waiting for our own PKT_PCLOSE control skb to be sent.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit 397ff16dce53888ec693b3718640be2560204751
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Tue Nov 6 23:17:02 2012 +0100
pppoatm: do not inline pppoatm_may_send()
The pppoatm_may_send() is quite heavy and it's called three times
in pppoatm_send() and inlining costs more than 200 bytes of code
(more than 10% of total pppoatm driver code size).
add/remove: 1/0 grow/shrink: 0/1 up/down: 132/-367 (-235)
function old new delta
pppoatm_may_send - 132 +132
pppoatm_send 900 533 -367
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit 071d93931a75dc1f82f0baa9959613af81c5a032
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Sat Nov 10 23:33:19 2012 +0100
pppoatm: drop frames to not-ready vcc
The vcc_destroy_socket() closes vcc before the protocol is detached
from vcc by calling vcc->push() with NULL skb. This leaves some time
window, where the protocol may call vcc->send() on closed vcc
and crash.
Now pppoatm_send(), like vcc_sendmsg(), checks for vcc flags that
indicate that vcc is not ready. If the vcc is not ready we just
drop frame. Queueing frames is much more complicated because we
don't have callbacks that inform us about vcc flags changes.
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit 3ac108006fd7f20cb8fc8ea2287f1497bcda00a1
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Tue Nov 6 23:17:00 2012 +0100
pppoatm: take ATM socket lock in pppoatm_send()
The pppoatm_send() does not take any lock that will prevent concurrent
vcc_sendmsg(). This causes two problems:
- there is no locking between checking the send queue size
with atm_may_send() and incrementing sk_wmem_alloc,
and the real queue size can be a little higher than sk_sndbuf
- the vcc->sendmsg() can be called concurrently. I'm not sure
if it's allowed. Some drivers (eni, nicstar, ...) seem
to assume it will never happen.
Now pppoatm_send() takes ATM socket lock, the same that is used
in vcc_sendmsg() and other ATM socket functions. The pppoatm_send()
is called with BH disabled, so bh_lock_sock() is used instead
of lock_sock().
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Cc: Chas Williams - CONTRACTOR <chas@cmf.nrl.navy.mil>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit e41faed9cde1acce657f75a0b19a1787e9850d3f
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Tue Nov 6 23:16:59 2012 +0100
pppoatm: fix module_put() race
The pppoatm used module_put() during unassignment from vcc with
hope that we have BKL. This assumption is no longer true.
Now owner field in atmvcc is used to move this module_put()
to vcc_destroy_socket().
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit 3b1a914595f3f9beb9e38ff3ddc7bdafa092ba22
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Tue Nov 6 23:16:58 2012 +0100
pppoatm: allow assign only on a connected socket
The pppoatm does not check if used vcc is in connected state,
causing an Oops in pppoatm_send() when vcc->send() is called
on not fully connected socket.
Now pppoatm can be assigned only on connected sockets; otherwise
-EINVAL error is returned.
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Cc: Chas Williams - CONTRACTOR <chas@cmf.nrl.navy.mil>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
commit ec809bd817dfa1905283468e4c813684ed4efe78
Author: Krzysztof Mazur <krzysiek@podlesie.net>
Date: Tue Nov 6 23:16:57 2012 +0100
atm: add owner of push() callback to atmvcc
The atm is using atmvcc->push(vcc, NULL) callback to notify protocol
that vcc will be closed and protocol must detach from it. This callback
is usually used by protocol to decrement module usage count by module_put(),
but it leaves small window then module is still used after module_put().
Now the owner of push() callback is kept in atmvcc and
module_put(atmvcc->owner) is called after the protocol is detached from vcc.
Signed-off-by: Krzysztof Mazur <krzysiek@podlesie.net>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: Chas Williams <chas@cmf.nrl.navy.mil>
commit ae088d663beebb3cad0e7abaac67ee61a7c578d5
Author: David Woodhouse <dwmw2@infradead.org>
Date: Sun Nov 25 12:06:52 2012 +0000
atm: br2684: Fix excessive queue bloat
There's really no excuse for an additional wmem_default of buffering
between the netdev queue and the ATM device. Two packets (one in-flight,
and one ready to send) ought to be fine. It's not as if it should take
long to get another from the netdev queue when we need it.
If necessary we can make the queue space configurable later, but I don't
think it's likely to be necessary.
cf. commit 9d02daf754238adac48fa075ee79e7edd3d79ed3 (pppoatm: Fix
excessive queue bloat) which did something very similar for PPPoATM.
Note that there is a tremendously unlikely race condition which may
result in qspace temporarily going negative. If a CPU running the
br2684_pop() function goes off into the weeds for a long period of time
after incrementing qspace to 1, but before calling netdev_wake_queue()...
and another CPU ends up calling br2684_start_xmit() and *stopping* the
queue again before the first CPU comes back, the netdev queue could
end up being woken when qspace has already reached zero.
An alternative approach to coping with this race would be to check in
br2684_start_xmit() for qspace==0 and return NETDEV_TX_BUSY, but just
using '> 0' and '< 1' for comparison instead of '== 0' and '!= 0' is
simpler. It just warranted a mention of *why* we do it that way...
Move the call to atmvcc->send() to happen *after* the accounting and
potentially stopping the netdev queue, in br2684_xmit_vcc(). This matters
if the ->send() call suffers an immediate failure, because it'll call
br2684_pop() with the offending skb before returning. We want that to
happen *after* we've done the initial accounting for the packet in
question. Also make it return an appropriate success/failure indication
while we're at it.
Tested by running 'ping -l 1000 bottomless.aaisp.net.uk' from within my
network, with only a single PPPoE-over-BR2684 link running. And after
setting txqueuelen on the nas0 interface to something low (5, in fact).
Before the patch, we'd see about 15 packets being queued and a resulting
latency of ~56ms being reached. After the patch, we see only about 8,
which is fairly much what we expect. And a max latency of ~36ms. On this
OpenWRT box, wmem_default is 163840.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Reviewed-by: Krzysztof Mazur <krzysiek@podlesie.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -92,6 +92,7 @@ struct pkt_hdr {
};
struct solos_skb_cb {
+ struct completion c;
struct atm_vcc *vcc;
uint32_t dma_addr;
};
@@ -164,7 +165,6 @@ static void fpga_queue(struct solos_card
static uint32_t fpga_tx(struct solos_card *);
static irqreturn_t solos_irq(int irq, void *dev_id);
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
-static int list_vccs(int vci);
static int atm_init(struct solos_card *, struct device *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
@@ -710,7 +710,8 @@ void solos_bh(unsigned long card_arg)
dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n",
le16_to_cpu(header->vpi), le16_to_cpu(header->vci),
port);
- continue;
+ dev_kfree_skb_any(skb);
+ break;
}
atm_charge(vcc, skb->truesize);
vcc->push(vcc, skb);
@@ -790,44 +791,6 @@ static struct atm_vcc *find_vcc(struct a
return vcc;
}
-static int list_vccs(int vci)
-{
- struct hlist_head *head;
- struct atm_vcc *vcc;
- struct hlist_node *node;
- struct sock *s;
- int num_found = 0;
- int i;
-
- read_lock(&vcc_sklist_lock);
- if (vci != 0){
- head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
- num_found ++;
- vcc = atm_sk(s);
- printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
- vcc->dev->number,
- vcc->vpi,
- vcc->vci);
- }
- } else {
- for(i = 0; i < VCC_HTABLE_SIZE; i++){
- head = &vcc_hash[i];
- sk_for_each(s, node, head) {
- num_found ++;
- vcc = atm_sk(s);
- printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
- vcc->dev->number,
- vcc->vpi,
- vcc->vci);
- }
- }
- }
- read_unlock(&vcc_sklist_lock);
- return num_found;
-}
-
-
static int popen(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
@@ -840,7 +803,7 @@ static int popen(struct atm_vcc *vcc)
return -EINVAL;
}
- skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
@@ -857,8 +820,6 @@ static int popen(struct atm_vcc *vcc)
set_bit(ATM_VF_ADDR, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
- list_vccs(0);
-
return 0;
}
@@ -866,10 +827,21 @@ static int popen(struct atm_vcc *vcc)
static void pclose(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
- struct sk_buff *skb;
+ unsigned char port = SOLOS_CHAN(vcc->dev);
+ struct sk_buff *skb, *tmpskb;
struct pkt_hdr *header;
- skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
+ /* Remove any yet-to-be-transmitted packets from the pending queue */
+ spin_lock(&card->tx_queue_lock);
+ skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
+ if (SKB_CB(skb)->vcc == vcc) {
+ skb_unlink(skb, &card->tx_queue[port]);
+ solos_pop(vcc, skb);
+ }
+ }
+ spin_unlock(&card->tx_queue_lock);
+
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n");
return;
@@ -881,15 +853,21 @@ static void pclose(struct atm_vcc *vcc)
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_PCLOSE);
- fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
+ init_completion(&SKB_CB(skb)->c);
- clear_bit(ATM_VF_ADDR, &vcc->flags);
- clear_bit(ATM_VF_READY, &vcc->flags);
+ fpga_queue(card, port, skb, NULL);
+
+ if (!wait_for_completion_timeout(&SKB_CB(skb)->c, 5 * HZ))
+ dev_warn(&card->dev->dev, "Timeout waiting for VCC close on port %d\n",
+ port);
/* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
tasklet has finished processing any incoming packets (and, more to
the point, using the vcc pointer). */
tasklet_unlock_wait(&card->tlet);
+
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+
return;
}
@@ -1010,9 +988,12 @@ static uint32_t fpga_tx(struct solos_car
if (vcc) {
atomic_inc(&vcc->stats->tx);
solos_pop(vcc, oldskb);
- } else
+ } else {
+ struct pkt_hdr *header = (void *)oldskb->data;
+ if (le16_to_cpu(header->type) == PKT_PCLOSE)
+ complete(&SKB_CB(oldskb)->c);
dev_kfree_skb_irq(oldskb);
-
+ }
}
}
/* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */
@@ -1246,7 +1227,7 @@ static int atm_init(struct solos_card *c
card->atmdev[i]->phy_data = (void *)(unsigned long)i;
atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND);
- skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n");
continue;
@@ -1343,6 +1324,8 @@ static struct pci_driver fpga_driver = {
static int __init solos_pci_init(void)
{
+ BUILD_BUG_ON(sizeof(struct solos_skb_cb) > sizeof(((struct sk_buff *)0)->cb));
+
printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION);
return pci_register_driver(&fpga_driver);
}
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -307,6 +307,7 @@ struct atm_vcc {
struct atm_dev *dev; /* device back pointer */
struct atm_qos qos; /* QOS */
struct atm_sap sap; /* SAP */
+ void (*release_cb)(struct atm_vcc *vcc); /* release_sock callback */
void (*push)(struct atm_vcc *vcc,struct sk_buff *skb);
void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */
int (*push_oam)(struct atm_vcc *vcc,void *cell);
@@ -314,6 +315,7 @@ struct atm_vcc {
void *dev_data; /* per-device data */
void *proto_data; /* per-protocol data */
struct k_atm_aal_stats *stats; /* pointer to AAL stats group */
+ struct module *owner; /* owner of ->push function */
/* SVC part --- may move later ------------------------------------- */
short itf; /* interface number */
struct sockaddr_atmsvc local;
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -68,12 +68,15 @@ struct br2684_vcc {
/* keep old push, pop functions for chaining */
void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
+ void (*old_release_cb)(struct atm_vcc *vcc);
+ struct module *old_owner;
enum br2684_encaps encaps;
struct list_head brvccs;
#ifdef CONFIG_ATM_BR2684_IPFILTER
struct br2684_filter filter;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
unsigned copies_needed, copies_failed;
+ atomic_t qspace;
};
struct br2684_dev {
@@ -181,18 +184,15 @@ static struct notifier_block atm_dev_not
static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct br2684_vcc *brvcc = BR2684_VCC(vcc);
- struct net_device *net_dev = skb->dev;
- pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
+ pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device);
brvcc->old_pop(vcc, skb);
- if (!net_dev)
- return;
-
- if (atm_may_send(vcc, 0))
- netif_wake_queue(net_dev);
-
+ /* If the queue space just went up from zero, wake */
+ if (atomic_inc_return(&brvcc->qspace) == 1)
+ netif_wake_queue(brvcc->device);
}
+
/*
* Send a packet out a particular vcc. Not to useful right now, but paves
* the way for multiple vcc's per itf. Returns true if we can send,
@@ -256,16 +256,30 @@ static int br2684_xmit_vcc(struct sk_buf
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- atmvcc->send(atmvcc, skb);
- if (!atm_may_send(atmvcc, 0)) {
+ if (atomic_dec_return(&brvcc->qspace) < 1) {
+ /* No more please! */
netif_stop_queue(brvcc->device);
- /*check for race with br2684_pop*/
- if (atm_may_send(atmvcc, 0))
- netif_start_queue(brvcc->device);
+ /* We might have raced with br2684_pop() */
+ if (unlikely(atomic_read(&brvcc->qspace) > 0))
+ netif_wake_queue(brvcc->device);
}
- return 1;
+ /* If this fails immediately, the skb will be freed and br2684_pop()
+ will wake the queue if appropriate. Just return an error so that
+ the stats are updated correctly */
+ return !atmvcc->send(atmvcc, skb);
+}
+
+static void br2684_release_cb(struct atm_vcc *atmvcc)
+{
+ struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
+
+ if (atomic_read(&brvcc->qspace) > 0)
+ netif_wake_queue(brvcc->device);
+
+ if (brvcc->old_release_cb)
+ brvcc->old_release_cb(atmvcc);
}
static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
@@ -279,6 +293,8 @@ static netdev_tx_t br2684_start_xmit(str
{
struct br2684_dev *brdev = BRPRIV(dev);
struct br2684_vcc *brvcc;
+ struct atm_vcc *atmvcc;
+ netdev_tx_t ret = NETDEV_TX_OK;
pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
read_lock(&devs_lock);
@@ -289,9 +305,26 @@ static netdev_tx_t br2684_start_xmit(str
dev->stats.tx_carrier_errors++;
/* netif_stop_queue(dev); */
dev_kfree_skb(skb);
- read_unlock(&devs_lock);
- return NETDEV_TX_OK;
+ goto out_devs;
}
+ atmvcc = brvcc->atmvcc;
+
+ bh_lock_sock(sk_atm(atmvcc));
+
+ if (test_bit(ATM_VF_RELEASED, &atmvcc->flags) ||
+ test_bit(ATM_VF_CLOSE, &atmvcc->flags) ||
+ !test_bit(ATM_VF_READY, &atmvcc->flags)) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ if (sock_owned_by_user(sk_atm(atmvcc))) {
+ netif_stop_queue(brvcc->device);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
if (!br2684_xmit_vcc(skb, dev, brvcc)) {
/*
* We should probably use netif_*_queue() here, but that
@@ -303,8 +336,11 @@ static netdev_tx_t br2684_start_xmit(str
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
}
+ out:
+ bh_unlock_sock(sk_atm(atmvcc));
+ out_devs:
read_unlock(&devs_lock);
- return NETDEV_TX_OK;
+ return ret;
}
/*
@@ -377,9 +413,10 @@ static void br2684_close_vcc(struct br26
list_del(&brvcc->brvccs);
write_unlock_irq(&devs_lock);
brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */
+ brvcc->atmvcc->release_cb = brvcc->old_release_cb;
brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */
+ module_put(brvcc->old_owner);
kfree(brvcc);
- module_put(THIS_MODULE);
}
/* when AAL5 PDU comes in: */
@@ -504,6 +541,13 @@ static int br2684_regvcc(struct atm_vcc
brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
if (!brvcc)
return -ENOMEM;
+ /*
+ * Allow two packets in the ATM queue. One actually being sent, and one
+ * for the ATM 'TX done' handler to send. It shouldn't take long to get
+ * the next one from the netdev queue, when we need it. More than that
+ * would be bufferbloat.
+ */
+ atomic_set(&brvcc->qspace, 2);
write_lock_irq(&devs_lock);
net_dev = br2684_find_dev(&be.ifspec);
if (net_dev == NULL) {
@@ -546,9 +590,13 @@ static int br2684_regvcc(struct atm_vcc
brvcc->encaps = (enum br2684_encaps)be.encaps;
brvcc->old_push = atmvcc->push;
brvcc->old_pop = atmvcc->pop;
+ brvcc->old_release_cb = atmvcc->release_cb;
+ brvcc->old_owner = atmvcc->owner;
barrier();
atmvcc->push = br2684_push;
atmvcc->pop = br2684_pop;
+ atmvcc->release_cb = br2684_release_cb;
+ atmvcc->owner = THIS_MODULE;
/* initialize netdev carrier state */
if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
@@ -687,10 +735,13 @@ static int br2684_ioctl(struct socket *s
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (cmd == ATM_SETBACKEND)
+ if (cmd == ATM_SETBACKEND) {
+ if (sock->state != SS_CONNECTED)
+ return -EINVAL;
return br2684_regvcc(atmvcc, argp);
- else
+ } else {
return br2684_create(argp);
+ }
#ifdef CONFIG_ATM_BR2684_IPFILTER
case BR2684_SETFILT:
if (atmvcc->push != br2684_push)
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -126,10 +126,19 @@ static void vcc_write_space(struct sock
rcu_read_unlock();
}
+static void vcc_release_cb(struct sock *sk)
+{
+ struct atm_vcc *vcc = atm_sk(sk);
+
+ if (vcc->release_cb)
+ vcc->release_cb(vcc);
+}
+
static struct proto vcc_proto = {
.name = "VCC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atm_vcc),
+ .release_cb = vcc_release_cb,
};
int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
@@ -156,7 +165,9 @@ int vcc_create(struct net *net, struct s
atomic_set(&sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
+ vcc->owner = NULL;
vcc->push_oam = NULL;
+ vcc->release_cb = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
sk->sk_destruct = vcc_sock_destruct;
@@ -175,6 +186,7 @@ static void vcc_destroy_socket(struct so
vcc->dev->ops->close(vcc);
if (vcc->push)
vcc->push(vcc, NULL); /* atmarpd has no push */
+ module_put(vcc->owner);
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -60,6 +60,8 @@ struct pppoatm_vcc {
struct atm_vcc *atmvcc; /* VCC descriptor */
void (*old_push)(struct atm_vcc *, struct sk_buff *);
void (*old_pop)(struct atm_vcc *, struct sk_buff *);
+ void (*old_release_cb)(struct atm_vcc *);
+ struct module *old_owner;
/* keep old push/pop for detaching */
enum pppoatm_encaps encaps;
atomic_t inflight;
@@ -107,6 +109,24 @@ static void pppoatm_wakeup_sender(unsign
ppp_output_wakeup((struct ppp_channel *) arg);
}
+static void pppoatm_release_cb(struct atm_vcc *atmvcc)
+{
+ struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
+
+ /*
+ * As in pppoatm_pop(), it's safe to clear the BLOCKED bit here because
+ * the wakeup *can't* race with pppoatm_send(). They both hold the PPP
+ * channel's ->downl lock. And the potential race with *setting* it,
+ * which leads to the double-check dance in pppoatm_may_send(), doesn't
+ * exist here. In the sock_owned_by_user() case in pppoatm_send(), we
+ * set the BLOCKED bit while the socket is still locked. We know that
+ * ->release_cb() can't be called until that's done.
+ */
+ if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
+ tasklet_schedule(&pvcc->wakeup_tasklet);
+ if (pvcc->old_release_cb)
+ pvcc->old_release_cb(atmvcc);
+}
/*
* This gets called every time the ATM card has finished sending our
* skb. The ->old_pop will take care up normal atm flow control,
@@ -151,12 +171,11 @@ static void pppoatm_unassign_vcc(struct
pvcc = atmvcc_to_pvcc(atmvcc);
atmvcc->push = pvcc->old_push;
atmvcc->pop = pvcc->old_pop;
+ atmvcc->release_cb = pvcc->old_release_cb;
tasklet_kill(&pvcc->wakeup_tasklet);
ppp_unregister_channel(&pvcc->chan);
atmvcc->user_back = NULL;
kfree(pvcc);
- /* Gee, I hope we have the big kernel lock here... */
- module_put(THIS_MODULE);
}
/* Called when an AAL5 PDU comes in */
@@ -165,9 +184,13 @@ static void pppoatm_push(struct atm_vcc
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pr_debug("\n");
if (skb == NULL) { /* VCC was closed */
+ struct module *module;
+
pr_debug("removing ATMPPP VCC %p\n", pvcc);
+ module = pvcc->old_owner;
pppoatm_unassign_vcc(atmvcc);
atmvcc->push(atmvcc, NULL); /* Pass along bad news */
+ module_put(module);
return;
}
atm_return(atmvcc, skb->truesize);
@@ -211,7 +234,7 @@ error:
ppp_input_error(&pvcc->chan, 0);
}
-static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
+static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
{
/*
* It's not clear that we need to bother with using atm_may_send()
@@ -269,10 +292,33 @@ static inline int pppoatm_may_send(struc
static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
+ struct atm_vcc *vcc;
+ int ret;
+
ATM_SKB(skb)->vcc = pvcc->atmvcc;
pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
(void) skb_pull(skb, 1);
+
+ vcc = ATM_SKB(skb)->vcc;
+ bh_lock_sock(sk_atm(vcc));
+ if (sock_owned_by_user(sk_atm(vcc))) {
+ /*
+ * Needs to happen (and be flushed, hence test_and_) before we unlock
+ * the socket. It needs to be seen by the time our ->release_cb gets
+ * called.
+ */
+ test_and_set_bit(BLOCKED, &pvcc->blocked);
+ goto nospace;
+ }
+ if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
+ test_bit(ATM_VF_CLOSE, &vcc->flags) ||
+ !test_bit(ATM_VF_READY, &vcc->flags)) {
+ bh_unlock_sock(sk_atm(vcc));
+ kfree_skb(skb);
+ return DROP_PACKET;
+ }
+
switch (pvcc->encaps) { /* LLC encapsulation needed */
case e_llc:
if (skb_headroom(skb) < LLC_LEN) {
@@ -285,8 +331,10 @@ static int pppoatm_send(struct ppp_chann
}
kfree_skb(skb);
skb = n;
- if (skb == NULL)
+ if (skb == NULL) {
+ bh_unlock_sock(sk_atm(vcc));
return DROP_PACKET;
+ }
} else if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
@@ -296,6 +344,7 @@ static int pppoatm_send(struct ppp_chann
goto nospace;
break;
case e_autodetect:
+ bh_unlock_sock(sk_atm(vcc));
pr_debug("Trying to send without setting encaps!\n");
kfree_skb(skb);
return 1;
@@ -305,9 +354,12 @@ static int pppoatm_send(struct ppp_chann
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
- return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
+ ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
? DROP_PACKET : 1;
+ bh_unlock_sock(sk_atm(vcc));
+ return ret;
nospace:
+ bh_unlock_sock(sk_atm(vcc));
/*
* We don't have space to send this SKB now, but we might have
* already applied SC_COMP_PROT compression, so may need to undo
@@ -362,6 +414,8 @@ static int pppoatm_assign_vcc(struct atm
atomic_set(&pvcc->inflight, NONE_INFLIGHT);
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
+ pvcc->old_owner = atmvcc->owner;
+ pvcc->old_release_cb = atmvcc->release_cb;
pvcc->encaps = (enum pppoatm_encaps) be.encaps;
pvcc->chan.private = pvcc;
pvcc->chan.ops = &pppoatm_ops;
@@ -377,7 +431,9 @@ static int pppoatm_assign_vcc(struct atm
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
+ atmvcc->release_cb = pppoatm_release_cb;
__module_get(THIS_MODULE);
+ atmvcc->owner = THIS_MODULE;
/* re-process everything received between connection setup and
backend setup */
@@ -406,6 +462,8 @@ static int pppoatm_ioctl(struct socket *
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (sock->state != SS_CONNECTED)
+ return -EINVAL;
return pppoatm_assign_vcc(atmvcc, argp);
}
case PPPIOCGCHAN:

View file

@ -1,442 +0,0 @@
commit b4bd8ad9bb311e8536f726f7a633620ccd358cde
Author: David Woodhouse <dwmw2@infradead.org>
Date: Thu May 24 04:58:27 2012 +0000
solos-pci: Fix DMA support
DMA support has finally made its way to the top of the TODO list, having
realised that a Geode using MMIO can't keep up with two ADSL2+ lines
each running at 21Mb/s.
This patch fixes a couple of bugs in the DMA support in the driver, so
once the corresponding FPGA update is complete and tested everything
should work properly.
We weren't storing the currently-transmitting skb, so we were never
unmapping it and never freeing/popping it when the TX was done.
And the addition of pci_set_master() is fairly self-explanatory.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
commit 152a2a8b5e1d4cbe91a7c66f1028db15164a3766
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Wed Dec 19 11:01:21 2012 +0000
solos-pci: ensure all TX packets are aligned to 4 bytes
The FPGA can't handled unaligned DMA (yet). So copy into an aligned buffer,
if skb->data isn't suitably aligned.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit 13af816469db3449c072afbae6c4c1bd9ccecccb
Author: Nathan Williams <nathan@traverse.com.au>
Date: Wed Dec 19 11:01:20 2012 +0000
solos-pci: add firmware upgrade support for new models
Signed-off-by: Nathan Williams <nathan@traverse.com.au>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit 7fbdadb5e951e4f0c0fc991ff5f50295568786e6
Author: Nathan Williams <nathan@traverse.com.au>
Date: Wed Dec 19 11:01:19 2012 +0000
solos-pci: remove superfluous debug output
Signed-off-by: Nathan Williams <nathan@traverse.com.au>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit f9baad02e7411d9f38d5ebe1a1cdcde4ceec100d
Author: Nathan Williams <nathan@traverse.com.au>
Date: Wed Dec 19 11:01:18 2012 +0000
solos-pci: add GPIO support for newer versions on Geos board
dwmw2: Tidy up a little, simpler matching on which GPIO is being accessed,
only register on newer boards, register under PCI device instead of
duplicating them under each ATM device.
Signed-off-by: Nathan Williams <nathan@traverse.com.au>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit cae49ede00ec3d0cda290b03fee55b72b49efc11
Author: David Woodhouse <dwmw2@infradead.org>
Date: Tue Dec 11 14:57:14 2012 +0000
solos-pci: fix double-free of TX skb in DMA mode
We weren't clearing card->tx_skb[port] when processing the TX done interrupt.
If there wasn't another skb ready to transmit immediately, this led to a
double-free because we'd free it *again* next time we did have a packet to
send.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
==
There is a typo here so we do a double lock instead of an unlock.
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
---
Only needed in linux-next. Introduced in f9baad02e7411d9 [14/17]
solos-pci: add GPIO support for newer versions on Geos board
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -42,7 +42,8 @@
#include <linux/swab.h>
#include <linux/slab.h>
-#define VERSION "0.07"
+#define VERSION "1.04"
+#define DRIVER_VERSION 0x01
#define PTAG "solos-pci"
#define CONFIG_RAM_SIZE 128
@@ -56,16 +57,21 @@
#define FLASH_BUSY 0x60
#define FPGA_MODE 0x5C
#define FLASH_MODE 0x58
+#define GPIO_STATUS 0x54
+#define DRIVER_VER 0x50
#define TX_DMA_ADDR(port) (0x40 + (4 * (port)))
#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
#define DATA_RAM_SIZE 32768
#define BUF_SIZE 2048
#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
-#define FPGA_PAGE 528 /* FPGA flash page size*/
-#define SOLOS_PAGE 512 /* Solos flash page size*/
-#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/
-#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/
+/* Old boards use ATMEL AD45DB161D flash */
+#define ATMEL_FPGA_PAGE 528 /* FPGA flash page size*/
+#define ATMEL_SOLOS_PAGE 512 /* Solos flash page size*/
+#define ATMEL_FPGA_BLOCK (ATMEL_FPGA_PAGE * 8) /* FPGA block size*/
+#define ATMEL_SOLOS_BLOCK (ATMEL_SOLOS_PAGE * 8) /* Solos block size*/
+/* Current boards use M25P/M25PE SPI flash */
+#define SPI_FLASH_BLOCK (256 * 64)
#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
@@ -123,11 +129,14 @@ struct solos_card {
struct sk_buff_head cli_queue[4];
struct sk_buff *tx_skb[4];
struct sk_buff *rx_skb[4];
+ unsigned char *dma_bounce;
wait_queue_head_t param_wq;
wait_queue_head_t fw_wq;
int using_dma;
+ int dma_alignment;
int fpga_version;
int buffer_size;
+ int atmel_flash;
};
@@ -452,7 +461,6 @@ static ssize_t console_show(struct devic
len = skb->len;
memcpy(buf, skb->data, len);
- dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
return len;
@@ -499,6 +507,78 @@ static ssize_t console_store(struct devi
return err?:count;
}
+struct geos_gpio_attr {
+ struct device_attribute attr;
+ int offset;
+};
+
+#define SOLOS_GPIO_ATTR(_name, _mode, _show, _store, _offset) \
+ struct geos_gpio_attr gpio_attr_##_name = { \
+ .attr = __ATTR(_name, _mode, _show, _store), \
+ .offset = _offset }
+
+static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ if (count != 1 && (count != 2 || buf[1] != '\n'))
+ return -EINVAL;
+
+ spin_lock_irq(&card->param_queue_lock);
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ if (buf[0] == '1') {
+ data32 |= 1 << gattr->offset;
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else if (buf[0] == '0') {
+ data32 &= ~(1 << gattr->offset);
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else {
+ count = -EINVAL;
+ }
+ spin_unlock_irq(&card->param_queue_lock);
+ return count;
+}
+
+static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ data32 = (data32 >> gattr->offset) & 1;
+
+ return sprintf(buf, "%d\n", data32);
+}
+
+static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ switch (gattr->offset) {
+ case 0:
+ /* HardwareVersion */
+ data32 = data32 & 0x1F;
+ break;
+ case 1:
+ /* HardwareVariant */
+ data32 = (data32 >> 5) & 0x0F;
+ break;
+ }
+ return sprintf(buf, "%d\n", data32);
+}
+
static DEVICE_ATTR(console, 0644, console_show, console_store);
@@ -507,6 +587,14 @@ static DEVICE_ATTR(console, 0644, consol
#include "solos-attrlist.c"
+static SOLOS_GPIO_ATTR(GPIO1, 0644, geos_gpio_show, geos_gpio_store, 9);
+static SOLOS_GPIO_ATTR(GPIO2, 0644, geos_gpio_show, geos_gpio_store, 10);
+static SOLOS_GPIO_ATTR(GPIO3, 0644, geos_gpio_show, geos_gpio_store, 11);
+static SOLOS_GPIO_ATTR(GPIO4, 0644, geos_gpio_show, geos_gpio_store, 12);
+static SOLOS_GPIO_ATTR(GPIO5, 0644, geos_gpio_show, geos_gpio_store, 13);
+static SOLOS_GPIO_ATTR(PushButton, 0444, geos_gpio_show, NULL, 14);
+static SOLOS_GPIO_ATTR(HardwareVersion, 0444, hardware_show, NULL, 0);
+static SOLOS_GPIO_ATTR(HardwareVariant, 0444, hardware_show, NULL, 1);
#undef SOLOS_ATTR_RO
#undef SOLOS_ATTR_RW
@@ -523,6 +611,23 @@ static struct attribute_group solos_attr
.name = "parameters",
};
+static struct attribute *gpio_attrs[] = {
+ &gpio_attr_GPIO1.attr.attr,
+ &gpio_attr_GPIO2.attr.attr,
+ &gpio_attr_GPIO3.attr.attr,
+ &gpio_attr_GPIO4.attr.attr,
+ &gpio_attr_GPIO5.attr.attr,
+ &gpio_attr_PushButton.attr.attr,
+ &gpio_attr_HardwareVersion.attr.attr,
+ &gpio_attr_HardwareVariant.attr.attr,
+ NULL
+};
+
+static struct attribute_group gpio_attr_group = {
+ .attrs = gpio_attrs,
+ .name = "gpio",
+};
+
static int flash_upgrade(struct solos_card *card, int chip)
{
const struct firmware *fw;
@@ -534,16 +639,25 @@ static int flash_upgrade(struct solos_ca
switch (chip) {
case 0:
fw_name = "solos-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 1:
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 2:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-db-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -553,7 +667,10 @@ static int flash_upgrade(struct solos_ca
case 3:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -569,6 +686,9 @@ static int flash_upgrade(struct solos_ca
dev_info(&card->dev->dev, "Flash upgrade starting\n");
+ /* New FPGAs require driver version before permitting flash upgrades */
+ iowrite32(DRIVER_VERSION, card->config_regs + DRIVER_VER);
+
numblocks = fw->size / blocksize;
dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size);
dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks);
@@ -598,9 +718,13 @@ static int flash_upgrade(struct solos_ca
/* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */
iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE);
- /* Copy block to buffer, swapping each 16 bits */
+ /* Copy block to buffer, swapping each 16 bits for Atmel flash */
for(i = 0; i < blocksize; i += 4) {
- uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i));
+ uint32_t word;
+ if (card->atmel_flash)
+ word = swahb32p((uint32_t *)(fw->data + offset + i));
+ else
+ word = *(uint32_t *)(fw->data + offset + i);
if(card->fpga_version > LEGACY_BUFFERS)
iowrite32(word, FLASH_BUF + i);
else
@@ -945,10 +1069,11 @@ static uint32_t fpga_tx(struct solos_car
for (port = 0; tx_pending; tx_pending >>= 1, port++) {
if (tx_pending & 1) {
struct sk_buff *oldskb = card->tx_skb[port];
- if (oldskb)
+ if (oldskb) {
pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
oldskb->len, PCI_DMA_TODEVICE);
-
+ card->tx_skb[port] = NULL;
+ }
spin_lock(&card->tx_queue_lock);
skb = skb_dequeue(&card->tx_queue[port]);
if (!skb)
@@ -960,8 +1085,14 @@ static uint32_t fpga_tx(struct solos_car
tx_started |= 1 << port;
oldskb = skb; /* We're done with this skb already */
} else if (skb && card->using_dma) {
- SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
+ unsigned char *data = skb->data;
+ if ((unsigned long)data & card->dma_alignment) {
+ data = card->dma_bounce + (BUF_SIZE * port);
+ memcpy(data, skb->data, skb->len);
+ }
+ SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
skb->len, PCI_DMA_TODEVICE);
+ card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
}
@@ -1133,17 +1264,33 @@ static int fpga_probe(struct pci_dev *de
db_fpga_upgrade = db_firmware_upgrade = 0;
}
- if (card->fpga_version >= DMA_SUPPORTED){
+ /* Stopped using Atmel flash after 0.03-38 */
+ if (fpga_ver < 39)
+ card->atmel_flash = 1;
+ else
+ card->atmel_flash = 0;
+
+ data32 = ioread32(card->config_regs + PORTS);
+ card->nr_ports = (data32 & 0x000000FF);
+
+ if (card->fpga_version >= DMA_SUPPORTED) {
+ pci_set_master(dev);
card->using_dma = 1;
+ if (1) { /* All known FPGA versions so far */
+ card->dma_alignment = 3;
+ card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL);
+ if (!card->dma_bounce) {
+ dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n");
+ /* Fallback to MMIO doesn't work */
+ goto out_unmap_both;
+ }
+ }
} else {
card->using_dma = 0;
/* Set RX empty flag for all ports */
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
}
- data32 = ioread32(card->config_regs + PORTS);
- card->nr_ports = (data32 & 0x000000FF);
-
pci_set_drvdata(dev, card);
tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
@@ -1178,6 +1325,10 @@ static int fpga_probe(struct pci_dev *de
if (err)
goto out_free_irq;
+ if (card->fpga_version >= DMA_SUPPORTED &&
+ sysfs_create_group(&card->dev->dev.kobj, &gpio_attr_group))
+ dev_err(&card->dev->dev, "Could not register parameter group for GPIOs\n");
+
return 0;
out_free_irq:
@@ -1186,6 +1337,7 @@ static int fpga_probe(struct pci_dev *de
tasklet_kill(&card->tlet);
out_unmap_both:
+ kfree(card->dma_bounce);
pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
@@ -1288,11 +1440,16 @@ static void fpga_remove(struct pci_dev *
iowrite32(1, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
+ if (card->fpga_version >= DMA_SUPPORTED)
+ sysfs_remove_group(&card->dev->dev.kobj, &gpio_attr_group);
+
atm_remove(card);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
+ kfree(card->dma_bounce);
+
/* Release device from reset */
iowrite32(0, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);

View file

@ -1,39 +0,0 @@
From 32e857cd1fbb006f56a99a2eab998173b1576533 Mon Sep 17 00:00:00 2001
From: Florian Fainelli <florian@openwrt.org>
Date: Mon, 10 Sep 2012 10:18:57 +0200
Subject: [PATCH net] ixp4xx_hss: fix build failure after logging conversion
Commit c75bb2c6f0cf455c23e60f14d780e841dd47f801 (ixp4xx_hss: Update to
current logging forms) converted the ixp4xx_hss module to use the current
logging macros, but forgot to include linux/module.h, leading to the
following build failures:
CC [M] drivers/net/wan/ixp4xx_hss.o
drivers/net/wan/ixp4xx_hss.c:1412:20: error: expected ';', ',' or ')'
before string constant
drivers/net/wan/ixp4xx_hss.c:1413:25: error: expected ';', ',' or ')'
before string constant
drivers/net/wan/ixp4xx_hss.c:1414:21: error: expected ';', ',' or ')'
before string constant
drivers/net/wan/ixp4xx_hss.c:1415:19: error: expected ';', ',' or ')'
before string constant
make[8]: *** [drivers/net/wan/ixp4xx_hss.o] Error 1
CC: stable@vger.kernel.org
Signed-off-by: Florian Fainelli <florian@openwrt.org>
---
[stable: 3.1+]
drivers/net/wan/ixp4xx_hss.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>

View file

@ -1,11 +0,0 @@
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -168,7 +168,7 @@ else
# annotated or signed tagged state (as git describe only
# looks at signed or annotated tags - git tag -a/-s) and
# LOCALVERSION= is not specified
- if test "${LOCALVERSION+set}" != "set"; then
+ if test "${CONFIG_LOCALVERSION+set}" != "set"; then
scm=$(scm_version --short)
res="$res${scm:++}"
fi

View file

@ -1,14 +0,0 @@
--- a/Makefile
+++ b/Makefile
@@ -559,9 +559,9 @@ endif # $(dot-config)
all: vmlinux
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os
+KBUILD_CFLAGS += -Os $(EXTRA_OPTIMIZATION)
else
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
endif
include $(srctree)/arch/$(SRCARCH)/Makefile

View file

@ -1,11 +0,0 @@
--- a/Makefile
+++ b/Makefile
@@ -374,7 +374,7 @@ KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)

View file

@ -1,17 +0,0 @@
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -16,6 +16,7 @@ enum {
false = 0,
true = 1
};
+#endif /* __KERNEL__ */
#undef offsetof
#ifdef __compiler_offsetof
@@ -23,6 +24,5 @@ enum {
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
-#endif /* __KERNEL__ */
#endif

View file

@ -1,22 +0,0 @@
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -10,7 +10,18 @@
#define USE_BSD
#include <endian.h>
#include <regex.h>
-#include <tools/le_byteshift.h>
+
+static inline void __put_unaligned_le16(uint16_t val, uint8_t *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+}
+
+static inline void put_unaligned_le32(uint32_t val, uint8_t *p)
+{
+ __put_unaligned_le16(val >> 16, p + 2);
+ __put_unaligned_le16(val, p);
+}
static void die(char *fmt, ...);

View file

@ -1,91 +0,0 @@
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -52,6 +52,18 @@
#define LOAD_OFFSET 0
#endif
+#ifndef SYMTAB_KEEP_STR
+#define SYMTAB_KEEP *(SORT(___ksymtab+*))
+#define SYMTAB_KEEP_GPL *(SORT(___ksymtab_gpl+*))
+#define SYMTAB_KEEP_STR *(__ksymtab_strings+*)
+#endif
+
+#ifndef SYMTAB_DISCARD
+#define SYMTAB_DISCARD
+#define SYMTAB_DISCARD_GPL
+#define SYMTAB_DISCARD_STR
+#endif
+
#ifndef SYMBOL_PREFIX
#define VMLINUX_SYMBOL(sym) sym
#else
@@ -275,14 +287,14 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(SORT(___ksymtab+*)) \
+ SYMTAB_KEEP \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(SORT(___ksymtab_gpl+*)) \
+ SYMTAB_KEEP_GPL \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
@@ -344,7 +356,7 @@
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ SYMTAB_KEEP_STR \
} \
\
/* __*init sections */ \
@@ -676,6 +688,9 @@
EXIT_TEXT \
EXIT_DATA \
EXIT_CALL \
+ SYMTAB_DISCARD \
+ SYMTAB_DISCARD_GPL \
+ SYMTAB_DISCARD_STR \
*(.discard) \
*(.discard.*) \
}
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -45,12 +45,19 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef MODULE
+#define __EXPORT_SUFFIX(sym)
+#else
+#define __EXPORT_SUFFIX(sym) "+" #sym
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define __EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings" \
+ __EXPORT_SUFFIX(sym)), aligned(1))) \
= MODULE_SYMBOL_PREFIX #sym; \
static const struct kernel_symbol __ksymtab_##sym \
__used \
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -346,7 +346,7 @@ targets += $(extra-y) $(MAKECMDGOALS) $(
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
quiet_cmd_cpp_lds_S = LDS $@
- cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \
+ cmd_cpp_lds_S = $(CPP) $(EXTRA_LDSFLAGS) $(cpp_flags) -P -C -U$(ARCH) \
-D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
$(obj)/%.lds: $(src)/%.lds.S FORCE

View file

@ -1,54 +0,0 @@
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -296,7 +296,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^)
quiet_cmd_lzma = LZMA $@
cmd_lzma = (cat $(filter-out FORCE,$^) | \
- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
quiet_cmd_lzo = LZO $@
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -226,7 +226,7 @@ cpio_list=
output="/dev/stdout"
output_file=""
is_cpio_compressed=
-compr="gzip -n -9 -f"
+compr="gzip -n -9 -f -"
arg="$1"
case "$arg" in
@@ -240,9 +240,9 @@ case "$arg" in
output_file="$1"
cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
output=${cpio_list}
- echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f"
- echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
- echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+ echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f -"
+ echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f -"
+ echo "$output_file" | grep -q "\.lzma$" && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
echo "$output_file" | grep -q "\.xz$" && \
compr="xz --check=crc32 --lzma2=dict=1MiB"
echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
@@ -303,7 +303,7 @@ if [ ! -z ${output_file} ]; then
if [ "${is_cpio_compressed}" = "compressed" ]; then
cat ${cpio_tfile} > ${output_file}
else
- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
+ (cat ${cpio_tfile} | ${compr} > ${output_file}) \
|| (rm -f ${output_file} ; false)
fi
[ -z ${cpio_file} ] && rm ${cpio_tfile}
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -40,6 +40,7 @@ static const struct compress_format {
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
+ { {0x6d, 0x00}, "lzma-openwrt", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0, 0}, NULL, NULL }

View file

@ -1,18 +0,0 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -181,7 +181,6 @@ config NF_CONNTRACK_FTP
config NF_CONNTRACK_H323
tristate "H.323 protocol support"
- depends on (IPV6 || IPV6=n)
depends on NETFILTER_ADVANCED
help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
@@ -627,7 +626,6 @@ config NETFILTER_XT_TARGET_SECMARK
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
- depends on (IPV6 || IPV6=n)
default m if NETFILTER_ADVANCED=n
---help---
This option adds a `TCPMSS' target, which allows you to alter the

View file

@ -1,18 +0,0 @@
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -7,13 +7,13 @@ config SND_PCM
select SND_TIMER
config SND_HWDEP
- tristate
+ tristate "Sound hardware support"
config SND_RAWMIDI
tristate
config SND_COMPRESS_OFFLOAD
- tristate
+ tristate "Compression offloading support"
# To be effective this also requires INPUT - users should say:
# select SND_JACK if INPUT=y || INPUT=SND

View file

@ -1,10 +0,0 @@
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -172,6 +172,7 @@ config CRYPTO_DEV_MV_CESA
depends on PLAT_ORION
select CRYPTO_ALGAPI
select CRYPTO_AES
+ select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_HASH
help

View file

@ -1,29 +0,0 @@
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -29,6 +29,7 @@ config SSB_SPROM
config SSB_BLOCKIO
bool
depends on SSB
+ default y
config SSB_PCIHOST_POSSIBLE
bool
@@ -49,7 +50,7 @@ config SSB_PCIHOST
config SSB_B43_PCI_BRIDGE
bool
depends on SSB_PCIHOST
- default n
+ default y
config SSB_PCMCIAHOST_POSSIBLE
bool
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -17,6 +17,7 @@ config BCMA
config BCMA_BLOCKIO
bool
depends on BCMA
+ default y
config BCMA_HOST_PCI_POSSIBLE
bool

View file

@ -1,23 +0,0 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -207,16 +207,16 @@ config BCH_CONST_T
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
- boolean
+ boolean "Textsearch support"
config TEXTSEARCH_KMP
- tristate
+ tristate "Textsearch KMP"
config TEXTSEARCH_BM
- tristate
+ tristate "Textsearch BM"
config TEXTSEARCH_FSM
- tristate
+ tristate "Textsearch FSM"
config BTREE
boolean

View file

@ -1,19 +0,0 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -143,13 +143,13 @@ config LIB80211
you want this built into your kernel.
config LIB80211_CRYPT_WEP
- tristate
+ tristate "LIB80211_CRYPT_WEP"
config LIB80211_CRYPT_CCMP
- tristate
+ tristate "LIB80211_CRYPT_CCMP"
config LIB80211_CRYPT_TKIP
- tristate
+ tristate "LIB80211_CRYPT_TKIP"
config LIB80211_DEBUG
bool "lib80211 debugging messages"

View file

@ -1,47 +0,0 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -31,7 +31,7 @@ config CRYPTO_FIPS
this is.
config CRYPTO_ALGAPI
- tristate
+ tristate "ALGAPI"
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
@@ -40,7 +40,7 @@ config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
- tristate
+ tristate "AEAD"
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
@@ -49,7 +49,7 @@ config CRYPTO_AEAD2
select CRYPTO_ALGAPI2
config CRYPTO_BLKCIPHER
- tristate
+ tristate "BLKCIPHER"
select CRYPTO_BLKCIPHER2
select CRYPTO_ALGAPI
@@ -60,7 +60,7 @@ config CRYPTO_BLKCIPHER2
select CRYPTO_WORKQUEUE
config CRYPTO_HASH
- tristate
+ tristate "HASH"
select CRYPTO_HASH2
select CRYPTO_ALGAPI
@@ -69,7 +69,7 @@ config CRYPTO_HASH2
select CRYPTO_ALGAPI2
config CRYPTO_RNG
- tristate
+ tristate "RNG"
select CRYPTO_RNG2
select CRYPTO_ALGAPI

View file

@ -1,22 +0,0 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,5 @@
config WIRELESS_EXT
- bool
+ bool "Wireless extensions"
config WEXT_CORE
def_bool y
@@ -11,10 +11,10 @@ config WEXT_PROC
depends on WEXT_CORE
config WEXT_SPY
- bool
+ bool "WEXT_SPY"
config WEXT_PRIV
- bool
+ bool "WEXT_PRIV"
config CFG80211
tristate "cfg80211 - wireless configuration API"

View file

@ -1,11 +0,0 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -2,7 +2,7 @@ menu "Core Netfilter Configuration"
depends on NET && INET && NETFILTER
config NETFILTER_NETLINK
- tristate
+ tristate "Netfilter NFNETLINK interface"
config NETFILTER_NETLINK_ACCT
tristate "Netfilter NFACCT over NFNETLINK interface"

View file

@ -1,23 +0,0 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -105,16 +105,16 @@ config AUDIT_GENERIC
# compression support is select'ed if needed
#
config ZLIB_INFLATE
- tristate
+ tristate "ZLIB inflate support"
config ZLIB_DEFLATE
- tristate
+ tristate "ZLIB deflate support"
config LZO_COMPRESS
- tristate
+ tristate "LZO compress support"
config LZO_DECOMPRESS
- tristate
+ tristate "LZO decompress support"
source "lib/xz/Kconfig"

View file

@ -1,219 +0,0 @@
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ /dev/null
@@ -1,96 +0,0 @@
-#ifndef __BCM963XX_TAG_H
-#define __BCM963XX_TAG_H
-
-#define TAGVER_LEN 4 /* Length of Tag Version */
-#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
-#define SIG1_LEN 20 /* Company Signature 1 Length */
-#define SIG2_LEN 14 /* Company Signature 2 Length */
-#define BOARDID_LEN 16 /* Length of BoardId */
-#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
-#define CHIPID_LEN 6 /* Chip Id Length */
-#define IMAGE_LEN 10 /* Length of Length Field */
-#define ADDRESS_LEN 12 /* Length of Address field */
-#define DUALFLAG_LEN 2 /* Dual Image flag Length */
-#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
-#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
-#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
-#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
-#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
-#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
-
-#define NUM_PIRELLI 2
-#define IMAGETAG_CRC_START 0xFFFFFFFF
-
-#define PIRELLI_BOARDS { \
- "AGPF-S0", \
- "DWV-S0", \
-}
-
-/*
- * The broadcom firmware assumes the rootfs starts the image,
- * therefore uses the rootfs start (flash_image_address)
- * to determine where to flash the image. Since we have the kernel first
- * we have to give it the kernel address, but the crc uses the length
- * associated with this address (root_length), which is added to the kernel
- * length (kernel_length) to determine the length of image to flash and thus
- * needs to be rootfs + deadcode (jffs2 EOF marker)
-*/
-
-struct bcm_tag {
- /* 0-3: Version of the image tag */
- char tag_version[TAGVER_LEN];
- /* 4-23: Company Line 1 */
- char sig_1[SIG1_LEN];
- /* 24-37: Company Line 2 */
- char sig_2[SIG2_LEN];
- /* 38-43: Chip this image is for */
- char chip_id[CHIPID_LEN];
- /* 44-59: Board name */
- char board_id[BOARDID_LEN];
- /* 60-61: Map endianness -- 1 BE 0 LE */
- char big_endian[ENDIANFLAG_LEN];
- /* 62-71: Total length of image */
- char total_length[IMAGE_LEN];
- /* 72-83: Address in memory of CFE */
- char cfe__address[ADDRESS_LEN];
- /* 84-93: Size of CFE */
- char cfe_length[IMAGE_LEN];
- /* 94-105: Address in memory of image start
- * (kernel for OpenWRT, rootfs for stock firmware)
- */
- char flash_image_start[ADDRESS_LEN];
- /* 106-115: Size of rootfs */
- char root_length[IMAGE_LEN];
- /* 116-127: Address in memory of kernel */
- char kernel_address[ADDRESS_LEN];
- /* 128-137: Size of kernel */
- char kernel_length[IMAGE_LEN];
- /* 138-139: Unused at the moment */
- char dual_image[DUALFLAG_LEN];
- /* 140-141: Unused at the moment */
- char inactive_flag[INACTIVEFLAG_LEN];
- /* 142-161: RSA Signature (not used; some vendors may use this) */
- char rsa_signature[RSASIG_LEN];
- /* 162-191: Compilation and related information (not used in OpenWrt) */
- char information1[TAGINFO1_LEN];
- /* 192-195: Version flash layout */
- char flash_layout_ver[FLASHLAYOUTVER_LEN];
- /* 196-199: kernel+rootfs CRC32 */
- __u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
- char information2[TAGINFO2_LEN];
- /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
- __u32 image_crc;
- /* 220-223: CRC32 of rootfs partition */
- __u32 rootfs_crc;
- /* 224-227: CRC32 of kernel partition */
- __u32 kernel_crc;
- /* 228-235: Unused at present */
- char reserved1[8];
- /* 236-239: CRC32 of header excluding last 20 bytes */
- __u32 header_crc;
- /* 240-255: Unused at present */
- char reserved2[16];
-};
-
-#endif /* __BCM63XX_TAG_H */
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -32,7 +32,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <linux/bcm963xx_tag.h>
#include <asm/mach-bcm63xx/board_bcm963xx.h>
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -77,6 +77,7 @@ header-y += blk_types.h
header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpqether.h
+header-y += bcm963xx_tag.h
header-y += bsg.h
header-y += can.h
header-y += capability.h
--- /dev/null
+++ b/include/linux/bcm963xx_tag.h
@@ -0,0 +1,96 @@
+#ifndef __BCM963XX_TAG_H
+#define __BCM963XX_TAG_H
+
+#define TAGVER_LEN 4 /* Length of Tag Version */
+#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
+#define SIG1_LEN 20 /* Company Signature 1 Length */
+#define SIG2_LEN 14 /* Company Signature 2 Length */
+#define BOARDID_LEN 16 /* Length of BoardId */
+#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
+#define CHIPID_LEN 6 /* Chip Id Length */
+#define IMAGE_LEN 10 /* Length of Length Field */
+#define ADDRESS_LEN 12 /* Length of Address field */
+#define DUALFLAG_LEN 2 /* Dual Image flag Length */
+#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
+#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
+#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
+#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
+#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
+#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
+
+#define NUM_PIRELLI 2
+#define IMAGETAG_CRC_START 0xFFFFFFFF
+
+#define PIRELLI_BOARDS { \
+ "AGPF-S0", \
+ "DWV-S0", \
+}
+
+/*
+ * The broadcom firmware assumes the rootfs starts the image,
+ * therefore uses the rootfs start (flash_image_address)
+ * to determine where to flash the image. Since we have the kernel first
+ * we have to give it the kernel address, but the crc uses the length
+ * associated with this address (root_length), which is added to the kernel
+ * length (kernel_length) to determine the length of image to flash and thus
+ * needs to be rootfs + deadcode (jffs2 EOF marker)
+*/
+
+struct bcm_tag {
+ /* 0-3: Version of the image tag */
+ char tag_version[TAGVER_LEN];
+ /* 4-23: Company Line 1 */
+ char sig_1[SIG1_LEN];
+ /* 24-37: Company Line 2 */
+ char sig_2[SIG2_LEN];
+ /* 38-43: Chip this image is for */
+ char chip_id[CHIPID_LEN];
+ /* 44-59: Board name */
+ char board_id[BOARDID_LEN];
+ /* 60-61: Map endianness -- 1 BE 0 LE */
+ char big_endian[ENDIANFLAG_LEN];
+ /* 62-71: Total length of image */
+ char total_length[IMAGE_LEN];
+ /* 72-83: Address in memory of CFE */
+ char cfe__address[ADDRESS_LEN];
+ /* 84-93: Size of CFE */
+ char cfe_length[IMAGE_LEN];
+ /* 94-105: Address in memory of image start
+ * (kernel for OpenWRT, rootfs for stock firmware)
+ */
+ char flash_image_start[ADDRESS_LEN];
+ /* 106-115: Size of rootfs */
+ char root_length[IMAGE_LEN];
+ /* 116-127: Address in memory of kernel */
+ char kernel_address[ADDRESS_LEN];
+ /* 128-137: Size of kernel */
+ char kernel_length[IMAGE_LEN];
+ /* 138-139: Unused at the moment */
+ char dual_image[DUALFLAG_LEN];
+ /* 140-141: Unused at the moment */
+ char inactive_flag[INACTIVEFLAG_LEN];
+ /* 142-161: RSA Signature (not used; some vendors may use this) */
+ char rsa_signature[RSASIG_LEN];
+ /* 162-191: Compilation and related information (not used in OpenWrt) */
+ char information1[TAGINFO1_LEN];
+ /* 192-195: Version flash layout */
+ char flash_layout_ver[FLASHLAYOUTVER_LEN];
+ /* 196-199: kernel+rootfs CRC32 */
+ __u32 fskernel_crc;
+ /* 200-215: Unused except on Alice Gate where is is information */
+ char information2[TAGINFO2_LEN];
+ /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
+ __u32 image_crc;
+ /* 220-223: CRC32 of rootfs partition */
+ __u32 rootfs_crc;
+ /* 224-227: CRC32 of kernel partition */
+ __u32 kernel_crc;
+ /* 228-235: Unused at present */
+ char reserved1[8];
+ /* 236-239: CRC32 of header excluding last 20 bytes */
+ __u32 header_crc;
+ /* 240-255: Unused at present */
+ char reserved2[16];
+};
+
+#endif /* __BCM63XX_TAG_H */

View file

@ -1,55 +0,0 @@
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,16 +3,18 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI)
select LZO_COMPRESS
select LZO_DECOMPRESS
- bool
+ tristate "Regmap"
config REGMAP_I2C
- tristate
+ select REGMAP
+ tristate "Regmap I2C"
config REGMAP_SPI
- tristate
+ select REGMAP
+ depends on SPI_MASTER
+ tristate "Regmap SPI"
config REGMAP_IRQ
bool
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,6 +1,8 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-lzo.o
+ifdef CONFIG_DEBUG_FS
+regmap-core-objs += regmap-debugfs.o
+endif
+obj-$(CONFIG_REGMAP) += regmap-core.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/err.h>
#define CREATE_TRACE_POINTS
@@ -679,3 +680,5 @@ static int __init regmap_initcall(void)
return 0;
}
postcore_initcall(regmap_initcall);
+
+MODULE_LICENSE("GPL");

View file

@ -1,39 +0,0 @@
From: Mark Miller <mark@mirell.org>
This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
certain Broadcom chipsets running CFE in order to load the kernel.
Signed-off-by: Mark Miller <mark@mirell.org>
Acked-by: Rob Landley <rob@landley.net>
---
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -877,9 +877,6 @@ config ARC
config ARCH_MAY_HAVE_PC_FDC
bool
-config BOOT_RAW
- bool
-
config CEVT_BCM1480
bool
@@ -2330,6 +2327,18 @@ config USE_OF
help
Include support for flattened device tree machine descriptions.
+config BOOT_RAW
+ bool "Enable the kernel to be executed from the load address"
+ default n
+ help
+ Allow the kernel to be executed from the load address for
+ bootloaders which cannot read the ELF format. This places
+ a jump to start_kernel at the load address.
+
+ If unsure, say N.
+
+
+
endmenu
config LOCKDEP_SUPPORT

View file

@ -1,28 +0,0 @@
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -976,6 +976,10 @@ config SYNC_R4K
config MIPS_MACHINE
def_bool n
+config IMAGE_CMDLINE_HACK
+ bool "OpenWrt specific image command line hack"
+ default n
+
config NO_IOPORT
def_bool n
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -141,6 +141,12 @@ FEXPORT(__kernel_entry)
j kernel_entry
#endif
+#ifdef CONFIG_IMAGE_CMDLINE_HACK
+ .ascii "CMDLINE:"
+EXPORT(__image_cmdline)
+ .fill 0x400
+#endif /* CONFIG_IMAGE_CMDLINE_HACK */
+
__REF
NESTED(kernel_entry, 16, sp) # kernel entry point

View file

@ -1,18 +0,0 @@
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -85,6 +85,7 @@ register struct thread_info *__current_t
#define STACK_WARN (THREAD_SIZE / 8)
+#if 0
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifdef CONFIG_DEBUG_STACK_USAGE
@@ -96,6 +97,7 @@ register struct thread_info *__current_t
#endif
#define free_thread_info(info) kfree(info)
+#endif
#endif /* !__ASSEMBLY__ */

View file

@ -1,11 +0,0 @@
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -52,7 +52,7 @@ machine_kexec(struct kimage *image)
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
- kexec_start_address = image->start;
+ kexec_start_address = (unsigned long) phys_to_virt(image->start);
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);

View file

@ -1,160 +0,0 @@
MIPS: allow disabling the kernel FPU emulator
This patch allows turning off the in-kernel Algorithmics
FPU emulator support, which allows one to save a couple of
precious blocks on an embedded system.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
--
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -961,6 +961,17 @@ config I8259
config MIPS_BONITO64
bool
+config MIPS_FPU_EMU
+ bool "Enable FPU emulation"
+ default y
+ help
+ This option allows building a kernel with or without the Algorithmics
+ FPU emulator enabled. Turning off this option results in a kernel which
+ does not catch floating operations exceptions. Make sure that your toolchain
+ is configured to enable software floating point emulation in that case.
+
+ If unsure say Y here.
+
config MIPS_MSC
bool
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -2,11 +2,13 @@
# Makefile for the Linux/MIPS kernel FPU emulation.
#
-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
+obj-y := kernel_linkage.o dsemul.o cp1emu.o
+
+obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \
dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \
dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
+ dp_sqrt.o sp_sqrt.o
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -58,7 +58,11 @@
#define __mips 4
/* Function which emulates a floating point instruction. */
+#ifdef CONFIG_DEBUG_FS
+DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+#endif
+#ifdef CONFIG_MIPS_FPU_EMU
static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
mips_instruction);
@@ -69,10 +73,6 @@ static int fpux_emu(struct pt_regs *,
/* Further private data for which no space exists in mips_fpu_struct */
-#ifdef CONFIG_DEBUG_FS
-DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
-#endif
-
/* Control registers */
#define FPCREG_RID 0 /* $0 = revision id */
@@ -1360,7 +1360,6 @@ int fpu_emulator_cop1Handler(struct pt_r
return sig;
}
-
#ifdef CONFIG_DEBUG_FS
static int fpuemu_stat_get(void *data, u64 *val)
@@ -1409,4 +1408,11 @@ static int __init debugfs_fpuemu(void)
return 0;
}
__initcall(debugfs_fpuemu);
-#endif
+#endif /* CONFIG_DEBUGFS */
+#else
+int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ int has_fpu)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_FPU_EMU */
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -109,6 +109,7 @@ int mips_dsemul(struct pt_regs *regs, mi
return SIGILL; /* force out of emulation loop */
}
+#ifdef CONFIG_MIPS_FPU_EMU
int do_dsemulret(struct pt_regs *xcp)
{
struct emuframe __user *fr;
@@ -165,3 +166,9 @@ int do_dsemulret(struct pt_regs *xcp)
return 1;
}
+#else
+int do_dsemulret(struct pt_regs *xcp)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_FPU_EMU */
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -29,6 +29,7 @@
#define SIGNALLING_NAN 0x7ff800007ff80000LL
+#ifdef CONFIG_MIPS_FPU_EMU
void fpu_emulator_init_fpu(void)
{
static int first = 1;
@@ -112,4 +113,36 @@ int fpu_emulator_restore_context32(struc
return err;
}
-#endif
+#endif /* CONFIG_64BIT */
+#else
+
+void fpu_emulator_init_fpu(void)
+{
+ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain"
+ "was compiled with software floating point support (soft-float)\n");
+ return;
+}
+
+int fpu_emulator_save_context(struct sigcontext __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_restore_context(struct sigcontext __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
+{
+ return 0;
+}
+
+int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
+{
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+#endif /* CONFIG_64BIT */
+#endif /* CONFIG_MIPS_FPU_EMU */

View file

@ -1,371 +0,0 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -90,8 +90,8 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
-KBUILD_AFLAGS_MODULE += -mlong-calls
-KBUILD_CFLAGS_MODULE += -mlong-calls
+KBUILD_AFLAGS_MODULE += -mno-long-calls
+KBUILD_CFLAGS_MODULE += -mno-long-calls
cflags-y += -ffreestanding
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -9,6 +9,11 @@ struct mod_arch_specific {
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
+
+ void *phys_plt_tbl;
+ void *virt_plt_tbl;
+ unsigned int phys_plt_offset;
+ unsigned int virt_plt_offset;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -44,14 +44,219 @@ static struct mips_hi16 *mips_hi16_list;
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
-#ifdef MODULE_START
+/*
+ * Get the potential max trampolines size required of the init and
+ * non-init sections. Only used if we cannot find enough contiguous
+ * physically mapped memory to put the module into.
+ */
+static unsigned int
+get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ const char *secstrings, unsigned int symindex, bool is_init)
+{
+ unsigned long ret = 0;
+ unsigned int i, j;
+ Elf_Sym *syms;
+
+ /* Everything marked ALLOC (this includes the exported symbols) */
+ for (i = 1; i < hdr->e_shnum; ++i) {
+ unsigned int info = sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_REL
+ && sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ /* If it's called *.init*, and we're not init, we're
+ not interested */
+ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
+ != is_init)
+ continue;
+
+ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
+ if (sechdrs[i].sh_type == SHT_REL) {
+ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rel[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ } else {
+ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rela[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ }
+ }
+
+ return ret;
+}
+
+#ifndef MODULE_START
+static void *alloc_phys(unsigned long size)
+{
+ unsigned order;
+ struct page *page;
+ struct page *p;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_THISNODE, order);
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
+ __free_page(p);
+
+ return page_address(page);
+}
+#endif
+
+static void free_phys(void *ptr, unsigned long size)
+{
+ struct page *page;
+ struct page *end;
+
+ page = virt_to_page(ptr);
+ end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
+
+ for (; page < end; ++page)
+ __free_page(page);
+}
+
+
void *module_alloc(unsigned long size)
{
+#ifdef MODULE_START
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
+#else
+ void *ptr;
+
+ if (size == 0)
+ return NULL;
+
+ ptr = alloc_phys(size);
+
+ /* If we failed to allocate physically contiguous memory,
+ * fall back to regular vmalloc. The module loader code will
+ * create jump tables to handle long jumps */
+ if (!ptr)
+ return vmalloc(size);
+
+ return ptr;
+#endif
}
+
+static inline bool is_phys_addr(void *ptr)
+{
+#ifdef CONFIG_64BIT
+ return (KSEGX((unsigned long)ptr) == CKSEG0);
+#else
+ return (KSEGX(ptr) == KSEG0);
#endif
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ if (is_phys_addr(module_region)) {
+ if (mod->module_init == module_region)
+ free_phys(module_region, mod->init_size);
+ else if (mod->module_core == module_region)
+ free_phys(module_region, mod->core_size);
+ else
+ BUG();
+ } else {
+ vfree(module_region);
+ }
+}
+
+static void *__module_alloc(int size, bool phys)
+{
+ void *ptr;
+
+ if (phys)
+ ptr = kmalloc(size, GFP_KERNEL);
+ else
+ ptr = vmalloc(size);
+ return ptr;
+}
+
+static void __module_free(void *ptr)
+{
+ if (is_phys_addr(ptr))
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int symindex = 0;
+ unsigned int core_size, init_size;
+ int i;
+
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (sechdrs[i].sh_type == SHT_SYMTAB)
+ symindex = i;
+
+ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
+ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
+
+ mod->arch.phys_plt_offset = 0;
+ mod->arch.virt_plt_offset = 0;
+ mod->arch.phys_plt_tbl = NULL;
+ mod->arch.virt_plt_tbl = NULL;
+
+ if ((core_size + init_size) == 0)
+ return 0;
+
+ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
+ if (!mod->arch.phys_plt_tbl)
+ return -ENOMEM;
+
+ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
+ if (!mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
@@ -72,28 +277,36 @@ static int apply_r_mips_32_rela(struct m
return 0;
}
-static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
+static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
+ void *start, Elf_Addr v)
{
- if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
- me->name);
- return -ENOEXEC;
- }
+ unsigned *tramp = start + *plt_offset;
+ *plt_offset += 4 * sizeof(int);
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
- }
+ /* adjust carry for addiu */
+ if (v & 0x00008000)
+ v += 0x10000;
- *location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
+ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
+ tramp[2] = 0x03200008; /* jr t9 */
+ tramp[3] = 0x00000000; /* nop */
- return 0;
+ return (Elf_Addr) tramp;
}
-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
+static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
+{
+ if (is_phys_addr(location))
+ return add_plt_entry_to(&me->arch.phys_plt_offset,
+ me->arch.phys_plt_tbl, v);
+ else
+ return add_plt_entry_to(&me->arch.virt_plt_offset,
+ me->arch.virt_plt_tbl, v);
+
+}
+
+static int set_r_mips_26(struct module *me, u32 *location, u32 ofs, Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
@@ -102,17 +315,31 @@ static int apply_r_mips_26_rela(struct m
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
+ v = add_plt_entry(me, location, v + (ofs << 2));
+ if (!v) {
+ printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
- return -ENOEXEC;
+ return -ENOEXEC;
+ }
+ ofs = 0;
}
- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
+ *location = (*location & ~0x03ffffff) | ((ofs + (v >> 2)) & 0x03ffffff);
return 0;
}
+static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ return set_r_mips_26(me, location, *location & 0x03ffffff, v);
+}
+
+static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ return set_r_mips_26(me, location, 0, v);
+}
+
static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
{
struct mips_hi16 *n;
@@ -380,11 +607,32 @@ int module_finalize(const Elf_Ehdr *hdr,
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
+
+ /* Get rid of the fixup trampoline if we're running the module
+ * from physically mapped address space */
+ if (me->arch.phys_plt_offset == 0) {
+ __module_free(me->arch.phys_plt_tbl);
+ me->arch.phys_plt_tbl = NULL;
+ }
+ if (me->arch.virt_plt_offset == 0) {
+ __module_free(me->arch.virt_plt_tbl);
+ me->arch.virt_plt_tbl = NULL;
+ }
+
return 0;
}
void module_arch_cleanup(struct module *mod)
{
+ if (mod->arch.phys_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ }
+ if (mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.virt_plt_tbl);
+ mod->arch.virt_plt_tbl = NULL;
+ }
+
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);

View file

@ -1,83 +0,0 @@
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+#define memset(__s, __c, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memset((__s), (__c), __len); \
+ else \
+ __ret = __builtin_memset((__s), (__c), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+#define memcpy(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memcpy((dst), (src), __len); \
+ else \
+ __ret = __builtin_memcpy((dst), (src), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+#define memmove(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memmove((dst), (src), __len); \
+ else \
+ __ret = __builtin_memmove((dst), (src), __len); \
+ __ret; \
+})
+
+#define __HAVE_ARCH_MEMCMP
+#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
#endif /* _ASM_STRING_H */
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,7 +3,7 @@
#
lib-y += csum_partial.o delay.o memcpy.o memcpy-inatomic.o memset.o \
- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+ strlen_user.o strncpy_user.o strnlen_user.o uncached.o memcmp.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
--- /dev/null
+++ b/arch/mips/lib/memcmp.c
@@ -0,0 +1,22 @@
+/*
+ * copied from linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+EXPORT_SYMBOL(memcmp);
+

View file

@ -1,35 +0,0 @@
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
}
}
+static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
+{
+ return mipsxx_perfcount_handler();
+}
+
static int __init mipsxx_init(void)
{
int counters;
@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
save_perf_irq = perf_irq;
perf_irq = mipsxx_perfcount_handler;
+ if (cp0_perfcount_irq >= 0)
+ return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
+ IRQF_SHARED, "Perfcounter", save_perf_irq);
+
return 0;
}
@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
{
int counters = op_model_mipsxx_ops.num_counters;
+ if (cp0_perfcount_irq >= 0)
+ free_irq(cp0_perfcount_irq, save_perf_irq);
+
counters = counters_per_cpu_to_total(counters);
on_each_cpu(reset_counters, (void *)(long)counters, 1);

View file

@ -1,17 +0,0 @@
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -816,10 +816,13 @@ static inline void cpu_probe_mips(struct
__cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
- case PRID_IMP_24KE:
c->cputype = CPU_24K;
__cpu_name[cpu] = "MIPS 24Kc";
break;
+ case PRID_IMP_24KE:
+ c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24KEc";
+ break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
__cpu_name[cpu] = "MIPS 25Kc";

View file

@ -1,32 +0,0 @@
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -39,6 +39,7 @@ void (*__flush_kernel_vmap_range)(unsign
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+EXPORT_SYMBOL(__flush_cache_all);
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,6 +19,9 @@
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
+#ifdef CONFIG_MIPS
+#include <asm/cacheflush.h>
+#endif
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -655,6 +658,9 @@ static int fuse_copy_fill(struct fuse_co
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
+#ifdef CONFIG_MIPS
+ __flush_cache_all();
+#endif
if (val) {
if (cs->write)
memcpy(cs->buf, *val, ncpy);

View file

@ -1,13 +0,0 @@
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -81,6 +81,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
return -ENOEXEC;
}
+ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
+ ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {

View file

@ -1,31 +0,0 @@
Upstream doesn't optimize the kernel and bootwrappers for ppc44x because
they still want to support gcc 3.3 -- well, we don't.
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -130,7 +130,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
KBUILD_CFLAGS += -mno-sched-epilog
endif
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
+cpu-as-$(CONFIG_40x) += -Wa,-m405
+cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
cpu-as-$(CONFIG_E500) += -Wa,-me500
cpu-as-$(CONFIG_E200) += -Wa,-me200
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -38,10 +38,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob
DTC_FLAGS ?= -p 1024
$(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
+$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
+$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405

View file

@ -1,10 +0,0 @@
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -94,7 +94,6 @@ else
endif
endif
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
ifeq ($(CONFIG_TUNE_CELL),y)
KBUILD_CFLAGS += $(call cc-option,-mtune=cell)

View file

@ -1,10 +0,0 @@
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -19,6 +19,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/random.h>

View file

@ -1,616 +0,0 @@
From eee16330c9de9adf7880cce9f1d32e13f89706bb Mon Sep 17 00:00:00 2001
From: Wu Zhangjin <wuzhangjin@gmail.com>
Date: Tue, 11 Jan 2011 13:16:47 +0000
Subject: MIPS: Add crash and kdump support
From: http://patchwork.linux-mips.org/patch/1025/
Hello folks,
Please find here MIPS crash and kdump patches.
This is patch set of 3 patches:
1. generic MIPS changes (kernel);
2. MIPS Cavium Octeon board kexec/kdump code (kernel);
3. Kexec user space MIPS changes.
Patches were tested on the latest linux-mips@ git kernel and the latest
kexec-tools git on Cavium Octeon 50xx board.
I also made the same code working on RMI XLR/XLS boards for both
mips32 and mips64 kernels.
Best regards,
Maxim Uvarov.
------
[ Zhangjin: Several trivial building failure has been fixed.
Note: the 2nd patch can not be cleanly applied, but may be a good
reference for the other board development:
+ MIPS Cavium Octeon board kexec,kdump support
http://patchwork.linux-mips.org/patch/1026/
And the 3rd patch has already been merged into the mainline kexec-tools:
+ some kexec MIPS improvements
http://patchwork.linux-mips.org/patch/1027/
kexec-tools is available here:
+ http://horms.net/projects/kexec/
git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
]
Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
---
(limited to 'arch/mips/kernel')
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -97,7 +97,8 @@ obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,75 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_CRASH_DUMP
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+#endif
+
+/* This keeps a track of which one is crashing cpu. */
+int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+void crash_shutdown_secondary(void *ignore)
+{
+ struct pt_regs *regs;
+ int cpu = smp_processor_id();
+
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ local_irq_disable();
+ if (!cpu_isset(cpu, cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpu_set(cpu, cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+ relocated_kexec_smp_wait(NULL);
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ unsigned int msecs;
+
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ dump_send_ipi(crash_shutdown_secondary);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ printk(KERN_EMERG "Sending IPI to other cpus...\n");
+ msecs = 10000;
+ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
+ mdelay(1);
+ }
+}
+
+#else
+static void crash_kexec_prepare_cpus(void) {}
+#endif
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus();
+ cpu_set(crashing_cpu, cpus_in_crash);
+}
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,86 @@
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_PROC_VMCORE
+static int __init parse_elfcorehdr(char *p)
+{
+ if (p)
+ elfcorehdr_addr = memparse(p, &p);
+ return 1;
+}
+__setup("elfcorehdr=", parse_elfcorehdr);
+#endif
+
+static int __init parse_savemaxmem(char *p)
+{
+ if (p)
+ saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
+
+ return 1;
+}
+__setup("savemaxmem=", parse_savemaxmem);
+
+
+static void *kdump_buf_page;
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
+
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr, KM_PTE0);
+ } else {
+ if (!kdump_buf_page) {
+ printk(KERN_WARNING "Kdump: Kdump buffer page not"
+ " allocated\n");
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr, KM_PTE0);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
+ }
+
+ return csize;
+}
+
+static int __init kdump_buf_page_init(void)
+{
+ int ret = 0;
+
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
+ " page\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+arch_initcall(kdump_buf_page_init);
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+#ifdef CONFIG_SMP
+void (*relocated_kexec_smp_wait) (void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
int
machine_kexec_prepare(struct kimage *kimage)
{
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
return 0;
}
@@ -33,11 +43,17 @@ machine_kexec_cleanup(struct kimage *kim
void
machine_shutdown(void)
{
+ if (_machine_kexec_shutdown)
+ _machine_kexec_shutdown();
}
void
machine_crash_shutdown(struct pt_regs *regs)
{
+ if (_machine_crash_shutdown)
+ _machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
}
typedef void (*noretfun_t)(void) __attribute__((noreturn));
@@ -52,7 +68,9 @@ machine_kexec(struct kimage *image)
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
- kexec_start_address = (unsigned long) phys_to_virt(image->start);
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
+
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
@@ -63,7 +81,7 @@ machine_kexec(struct kimage *image)
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through KSEG0 (or
* CKSEG0 or XPHYS if on 64bit system), hence the
- * pys_to_virt() call.
+ * phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
@@ -81,5 +99,13 @@ machine_kexec(struct kimage *image)
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
__flush_cache_all();
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+ (void *)(kexec_smp_wait - relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
((noretfun_t) reboot_code_buffer)();
}
+
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -15,6 +15,11 @@
#include <asm/addrspace.h>
LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+ PTR_L a3, arg3
+
PTR_L s0, kexec_indirection_page
PTR_L s1, kexec_start_address
@@ -26,7 +31,6 @@ process_entry:
and s3, s2, 0x1
beq s3, zero, 1f
and s4, s2, ~0x1 /* store destination addr in s4 */
- move a0, s4
b process_entry
1:
@@ -60,23 +64,100 @@ copy_word:
b process_entry
done:
+#ifdef CONFIG_SMP
+ /* kexec_flag reset is signal to other CPUs what kernel
+ was moved to it's location. Note - we need relocated address
+ of kexec_flag. */
+
+ bal 1f
+ 1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+ LONG_S zero,(t0)
+#endif
+
+ sync
/* jump to kexec_start_address */
j s1
END(relocate_new_kernel)
-kexec_start_address:
- EXPORT(kexec_start_address)
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at entry (?) point.
+ */
+LEAF(kexec_smp_wait)
+ PTR_L a0, s_arg0
+ PTR_L a1, s_arg1
+ PTR_L a2, s_arg2
+ PTR_L a3, s_arg3
+ PTR_L s1, kexec_start_address
+
+ /* Non-relocated address works for args and kexec_start_address ( old
+ * kernel is not overwritten). But we need relocated address of
+ * kexec_flag.
+ */
+
+ bal 1f
+1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+
+1: LONG_L s0, (t0)
+ bne s0, zero,1b
+
+ sync
+ j s1
+ END(kexec_smp_wait)
+#endif
+
+#ifdef __mips64
+ /* all PTR's must be aligned to 8 byte in 64-bit mode */
+ .align 3
+#endif
+
+/* All parameters to new kernel are passed in registers a0-a3.
+ * kexec_args[0..3] are uses to prepare register values.
+ */
+
+EXPORT(kexec_args)
+arg0: PTR 0x0
+arg1: PTR 0x0
+arg2: PTR 0x0
+arg3: PTR 0x0
+ .size kexec_args,PTRSIZE*4
+
+#ifdef CONFIG_SMP
+/*
+ * Secondary CPUs may have different kernel parameters in
+ * their registers a0-a3. secondary_kexec_args[0..3] are used
+ * to prepare register values.
+ */
+EXPORT(secondary_kexec_args)
+s_arg0: PTR 0x0
+s_arg1: PTR 0x0
+s_arg2: PTR 0x0
+s_arg3: PTR 0x0
+ .size secondary_kexec_args,PTRSIZE*4
+kexec_flag:
+ LONG 0x1
+
+#endif
+
+EXPORT(kexec_start_address)
PTR 0x0
.size kexec_start_address, PTRSIZE
-kexec_indirection_page:
- EXPORT(kexec_indirection_page)
+EXPORT(kexec_indirection_page)
PTR 0
.size kexec_indirection_page, PTRSIZE
relocate_new_kernel_end:
-relocate_new_kernel_size:
- EXPORT(relocate_new_kernel_size)
+EXPORT(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
.size relocate_new_kernel_size, PTRSIZE
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/console.h>
#include <linux/pfn.h>
#include <linux/debugfs.h>
+#include <linux/kexec.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -523,12 +524,62 @@ static void __init arch_mem_init(char **
}
bootmem_init();
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start != crashk_res.end)
+ reserve_bootmem(crashk_res.start,
+ crashk_res.end - crashk_res.start + 1,
+ BOOTMEM_DEFAULT);
+#endif
device_tree_init();
sparse_init();
plat_swiotlb_setup();
paging_init();
}
+#ifdef CONFIG_KEXEC
+static inline unsigned long long get_total_mem(void)
+{
+ unsigned long long total;
+ total = max_pfn - min_low_pfn;
+ return total << PAGE_SHIFT;
+}
+
+static void __init mips_parse_crashkernel(void)
+{
+ unsigned long long total_mem;
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ total_mem = get_total_mem();
+ ret = parse_crashkernel(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+static void __init request_crashkernel(struct resource *res)
+{
+ int ret;
+
+ ret = request_resource(res, &crashk_res);
+ if (!ret)
+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+ "for crashkernel\n",
+ (unsigned long)((crashk_res.end -
+ crashk_res.start + 1) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+}
+#else
+static void __init mips_parse_crashkernel(void)
+{
+}
+static void __init request_crashkernel(struct resource *res)
+{
+}
+#endif
+
static void __init resource_init(void)
{
int i;
@@ -544,6 +595,8 @@ static void __init resource_init(void)
/*
* Request address space for all standard RAM.
*/
+ mips_parse_crashkernel();
+
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct resource *res;
unsigned long start, end;
@@ -580,6 +633,7 @@ static void __init resource_init(void)
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
+ request_crashkernel(res);
}
}
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -433,3 +433,21 @@ void flush_tlb_one(unsigned long vaddr)
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
+
+#if defined(CONFIG_KEXEC)
+void (*dump_ipi_function_ptr)(void *) = NULL;
+void dump_send_ipi(void (*dump_ipi_callback)(void *))
+{
+ int i;
+ int cpu = smp_processor_id();
+
+ dump_ipi_function_ptr = dump_ipi_callback;
+ smp_mb();
+ for_each_online_cpu(i)
+ if (i != cpu)
+ core_send_ipi(i, SMP_DUMP);
+
+}
+EXPORT_SYMBOL(dump_send_ipi);
+#endif
+
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -9,22 +9,45 @@
#ifndef _MIPS_KEXEC
# define _MIPS_KEXEC
+#include <asm/stacktrace.h>
+
+extern unsigned long long elfcorehdr_addr;
+
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
-
-#define KEXEC_CONTROL_PAGE_SIZE 4096
+/* Reserve 3*4096 bytes for board-specific info */
+#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
+#define MAX_NOTE_BYTES 1024
static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs)
+ struct pt_regs *oldregs)
{
- /* Dummy implementation for now */
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else
+ prepare_frametrace(newregs);
}
+#ifdef CONFIG_KEXEC
+struct kimage;
+extern unsigned long kexec_args[4];
+extern int (*_machine_kexec_prepare)(struct kimage *);
+extern void (*_machine_kexec_shutdown)(void);
+extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+#ifdef CONFIG_SMP
+extern const unsigned char kexec_smp_wait[];
+extern unsigned long secondary_kexec_args[4];
+extern void (*relocated_kexec_smp_wait) (void *);
+extern atomic_t kexec_ready_to_reboot;
+#endif
+#endif
+
#endif /* !_MIPS_KEXEC */
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -40,6 +40,8 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
+/* Used by kexec crashdump to save all cpu's state */
+#define SMP_DUMP 0x8
extern volatile cpumask_t cpu_callin_map;
@@ -91,4 +93,9 @@ static inline void arch_send_call_functi
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
+extern void core_send_ipi(int cpu, unsigned int action);
+#if defined(CONFIG_KEXEC)
+extern void (*dump_ipi_function_ptr)(void *);
+void dump_send_ipi(void (*dump_ipi_callback)(void *));
+#endif
#endif /* __ASM_SMP_H */

View file

@ -1,157 +0,0 @@
From 03cd81fbca6b91317ec1a7b3b3c09fb8d08f83a6 Mon Sep 17 00:00:00 2001
From: Wu Zhangjin <wuzhangjin@gmail.com>
Date: Tue, 11 Jan 2011 18:42:08 +0000
Subject: MIPS: Kexec: Enhance the support
Changes:
o Print more information in machine_kexec() for debugging
E.g. with this information, the kexec_start_address has been found
it was wrong with 64bit kernel / o32 kexec-tools. Which must be
fixed later.
o Link relocate_kernel.S to a section for future extension
This allows more functions can be added for the kexec relocation
part even written in C. to add code into that section, you just need
to mark your function or data with __kexec or
__attribute__((__section__(".__kexec.relocate")))
TODO:
1. Make 64bit kernel / o32|n32|64 kexec-tools works
Fix the user-space kexec-tools, seems the tool only work for 32bit
machine. So, we need to add 64bit support for it. The address of the
entry point(kexec_start_address) is wrong and make the "kexec -e" fail.
the real entry point must be read from the new kernel image by the
user-space kexec-tools, otherwise, it will not work. The above 64bit
support tested is 64bit kernel with o32 user-space kexec-tools. The root
cause may be the different definition of virt_to_phys() and
phys_to_virt() in the kexec-tools and kernel space for 64bit system /
o32 kernel.
Ref: http://www.linux-mips.org/archives/linux-mips/2009-08/msg00149.html
2. Pass the arguments from kexec-tools to the new kernel image
Please refer to: "MIPS: Loongson: Kexec: Pass parameters to new kernel"
Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
---
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -36,6 +36,16 @@ static inline void crash_setup_regs(stru
}
#ifdef CONFIG_KEXEC
+
+#define __kexec __attribute__((__section__(".__kexec.relocate")))
+
+/* The linker tells us where the relocate_new_kernel part is. */
+extern const unsigned char __start___kexec_relocate;
+extern const unsigned char __end___kexec_relocate;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
struct kimage;
extern unsigned long kexec_args[4];
extern int (*_machine_kexec_prepare)(struct kimage *);
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -14,10 +14,6 @@
#include <asm/page.h>
extern const unsigned char relocate_new_kernel[];
-extern const size_t relocate_new_kernel_size;
-
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
void (*_machine_kexec_shutdown)(void) = NULL;
@@ -61,21 +57,34 @@ typedef void (*noretfun_t)(void) __attri
void
machine_kexec(struct kimage *image)
{
+ unsigned long kexec_relocate_size;
unsigned long reboot_code_buffer;
unsigned long entry;
unsigned long *ptr;
+ kexec_relocate_size = (unsigned long)(&__end___kexec_relocate) -
+ (unsigned long)(&__start___kexec_relocate);
+ pr_info("kexec_relocate_size = %lu\n", kexec_relocate_size);
+
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
+ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
kexec_start_address =
(unsigned long) phys_to_virt(image->start);
+ pr_info("kexec_start_address(entry point of new kernel) = %p\n",
+ (void *)kexec_start_address);
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
+ pr_info("kexec_indirection_page = %p\n",
+ (void *)kexec_indirection_page);
- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
- relocate_new_kernel_size);
+ memcpy((void *)reboot_code_buffer, &__start___kexec_relocate,
+ kexec_relocate_size);
+
+ pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
+ &__start___kexec_relocate, (void *)reboot_code_buffer);
/*
* The generic kexec code builds a page list with physical
@@ -96,8 +105,8 @@ machine_kexec(struct kimage *image)
*/
local_irq_disable();
- printk("Will call new kernel at %08lx\n", image->start);
- printk("Bye ...\n");
+ pr_info("Will call new kernel at %p\n", (void *)kexec_start_address);
+ pr_info("Bye ...\n");
__flush_cache_all();
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_wait cycle */
@@ -108,4 +117,3 @@ machine_kexec(struct kimage *image)
#endif
((noretfun_t) reboot_code_buffer)();
}
-
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -14,6 +14,8 @@
#include <asm/stackframe.h>
#include <asm/addrspace.h>
+ .section .kexec.relocate, "ax"
+
LEAF(relocate_new_kernel)
PTR_L a0, arg0
PTR_L a1, arg1
@@ -155,9 +157,3 @@ EXPORT(kexec_start_address)
EXPORT(kexec_indirection_page)
PTR 0
.size kexec_indirection_page, PTRSIZE
-
-relocate_new_kernel_end:
-
-EXPORT(relocate_new_kernel_size)
- PTR relocate_new_kernel_end - relocate_new_kernel
- .size relocate_new_kernel_size, PTRSIZE
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -50,6 +50,10 @@ SECTIONS
*(.text.*)
*(.fixup)
*(.gnu.warning)
+ __start___kexec_relocate = .;
+ KEEP(*(.kexec.relocate))
+ KEEP(*(.__kexec.relocate))
+ __end___kexec_relocate = .;
} :text = 0
_etext = .; /* End of text section */

View file

@ -1,52 +0,0 @@
From 49d07a29653b1f2c6ae273b3d8fe93d981f43004 Mon Sep 17 00:00:00 2001
From: Wu Zhangjin <wuzhangjin@gmail.com>
Date: Wed, 12 Jan 2011 20:59:32 +0000
Subject: MIPS: Kexec: Init the arguments for the new kernel image
Whenever the kexec-tools pass the command lines to the new kernel image,
init the arguments as the ones for the 1st kernel image. This fixed the
booting failure of Kexec on YeeLoong.
Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
---
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/delay.h>
+#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
@@ -23,9 +24,30 @@ void (*relocated_kexec_smp_wait) (void *
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
+static void machine_kexec_init_args(void)
+{
+ kexec_args[0] = fw_arg0;
+ kexec_args[1] = fw_arg1;
+ kexec_args[2] = fw_arg2;
+ kexec_args[3] = fw_arg3;
+
+ pr_info("kexec_args[0] (argc): %lu\n", kexec_args[0]);
+ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
+ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+}
+
int
machine_kexec_prepare(struct kimage *kimage)
{
+ /*
+ * Whenever arguments passed from kexec-tools, Init the arguments as
+ * the original ones to avoid booting failure.
+ *
+ * This can be overrided by _machine_kexec_prepare().
+ */
+ machine_kexec_init_args();
+
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
return 0;

View file

@ -1,88 +0,0 @@
From 240c76841b26f1b09aaced33414ee1d08b6454cf Mon Sep 17 00:00:00 2001
From: Wu Zhangjin <wuzhangjin@gmail.com>
Date: Sat, 15 Jan 2011 12:46:03 +0000
Subject: MIPS: Get kernel parameters from kexec-tools
Before, we simply use the command lines from the original bootloader,
but it is not convenient. Now, we accept the kernel parameters from the
--command-line or --append option of the kexec-tools. But If not
--command-line or --apend option indicated, will fall back to use the
ones from the original bootloader.
Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
---
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
+#include <asm/uaccess.h>
extern const unsigned char relocate_new_kernel[];
@@ -37,6 +38,56 @@ static void machine_kexec_init_args(void
pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
}
+#define ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
+
+int machine_kexec_pass_args(struct kimage *image)
+{
+ int i, argc = 0;
+ char *bootloader = "kexec";
+ int *kexec_argv = (int *)kexec_args[1];
+
+ for (i = 0; i < image->nr_segments; i++) {
+ if (!strncmp(bootloader, (char *)image->segment[i].buf,
+ strlen(bootloader))) {
+ /*
+ * convert command line string to array
+ * of parameters (as bootloader does).
+ */
+ /*
+ * Note: we do treat the 1st string "kexec" as an
+ * argument ;-) so, argc here is 1.
+ */
+ char *str = (char *)image->segment[i].buf;
+ char *ptr = strchr(str, ' ');
+ char *kbuf = (char *)kexec_argv[0];
+ /* Whenever --command-line or --append used, "kexec" is copied */
+ argc = 1;
+ /* Parse the offset */
+ while (ptr && (ARGV_MAX_ARGS > argc)) {
+ *ptr = '\0';
+ if (ptr[1] != ' ' && ptr[1] != '\0') {
+ int offt = (int)(ptr - str + 1);
+ kexec_argv[argc] = (int)kbuf + offt;
+ argc++;
+ }
+ ptr = strchr(ptr + 1, ' ');
+ }
+ if (argc > 1) {
+ /* Copy to kernel space */
+ copy_from_user(kbuf, (char *)image->segment[i].buf, image->segment[i].bufsz);
+ fw_arg0 = kexec_args[0] = argc;
+ }
+ break;
+ }
+ }
+
+ pr_info("argc = %lu\n", kexec_args[0]);
+ for (i = 0; i < kexec_args[0]; i++)
+ pr_info("argv[%d] = %p, %s\n", i, (char *)kexec_argv[i], (char *)kexec_argv[i]);
+
+ return 0;
+}
+
int
machine_kexec_prepare(struct kimage *kimage)
{
@@ -47,6 +98,7 @@ machine_kexec_prepare(struct kimage *kim
* This can be overrided by _machine_kexec_prepare().
*/
machine_kexec_init_args();
+ machine_kexec_pass_args(kimage);
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);

View file

@ -1,83 +0,0 @@
From 4aded085fa0057a9a1e1dcec631f950307360c1f Mon Sep 17 00:00:00 2001
From: Wu Zhangjin <wuzhangjin@gmail.com>
Date: Tue, 11 Jan 2011 13:46:19 +0000
Subject: MIPS: Fix compiling failure of relocate_kernel.S
The following errors is fixed with the help of <asm/asm_nosec.h>. for
this file need to put different symbols in the same section, the
original LEAF, NESTED and EXPORT (without explicit section indication)
must be used, <asm/asm_nosec.h> does it.
arch/mips/kernel/relocate_kernel.S: Assembler messages:
arch/mips/kernel/relocate_kernel.S:162: Error: operation combines symbols in different segments
Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
---
(limited to 'arch/mips/kernel')
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -7,6 +7,7 @@
*/
#include <asm/asm.h>
+#include <asm/asm_nosec.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/page.h>
--- /dev/null
+++ b/arch/mips/include/asm/asm_nosec.h
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2010 Wu Zhangjin <wuzhangjin@gmail.com>
+ *
+ * Derive from <asm/asm.h>
+ *
+ * Override the macros without -ffunction-sections and -fdata-sections support.
+ * If several functions or data must be put in the same section, please include
+ * this header file after the <asm/asm.h> to override the generic definition.
+ */
+
+#ifndef __ASM_ASM_NOSEC_H
+#define __ASM_ASM_NOSEC_H
+
+#undef LEAF
+#undef NESTED
+#undef EXPORT
+
+/*
+ * LEAF - declare leaf routine
+ */
+#define LEAF(symbol) \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
+symbol: .frame sp, 0, ra
+
+/*
+ * NESTED - declare nested routine entry point
+ */
+#define NESTED(symbol, framesize, rpc) \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
+symbol: .frame sp, framesize, rpc
+
+/*
+ * EXPORT - export definition of symbol
+ */
+#define EXPORT(symbol) \
+ .globl symbol; \
+symbol:
+
+#endif /* __ASM_ASM_NOSEC_H */

View file

@ -1,186 +0,0 @@
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -25,67 +25,104 @@ void (*relocated_kexec_smp_wait) (void *
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
-static void machine_kexec_init_args(void)
+#define KEXEC_MIPS_ARGV_BUF_SIZE COMMAND_LINE_SIZE
+#define KEXEC_MIPS_ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
+
+char kexec_argv_buf[KEXEC_MIPS_ARGV_BUF_SIZE] __kexec;
+char *kexec_argv[KEXEC_MIPS_ARGV_MAX_ARGS] __kexec;
+
+static void
+machine_kexec_print_args(void)
{
- kexec_args[0] = fw_arg0;
- kexec_args[1] = fw_arg1;
- kexec_args[2] = fw_arg2;
- kexec_args[3] = fw_arg3;
+ int i;
pr_info("kexec_args[0] (argc): %lu\n", kexec_args[0]);
pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
-}
-#define ARGV_MAX_ARGS (COMMAND_LINE_SIZE / 15)
+ for (i = 0; i < kexec_args[0]; i++)
+ pr_info("kexec_argv[%d] = %p, %s\n", i,
+ (char *)kexec_argv[i], (char *)kexec_argv[i]);
+}
-int machine_kexec_pass_args(struct kimage *image)
+static void
+machine_kexec_init_argv(struct kimage *image)
{
- int i, argc = 0;
- char *bootloader = "kexec";
- int *kexec_argv = (int *)kexec_args[1];
+ void __user *buf = NULL;
+ size_t bufsz;
+ size_t size;
+ int i;
+ bufsz = 0;
for (i = 0; i < image->nr_segments; i++) {
- if (!strncmp(bootloader, (char *)image->segment[i].buf,
- strlen(bootloader))) {
- /*
- * convert command line string to array
- * of parameters (as bootloader does).
- */
- /*
- * Note: we do treat the 1st string "kexec" as an
- * argument ;-) so, argc here is 1.
- */
- char *str = (char *)image->segment[i].buf;
- char *ptr = strchr(str, ' ');
- char *kbuf = (char *)kexec_argv[0];
- /* Whenever --command-line or --append used, "kexec" is copied */
- argc = 1;
- /* Parse the offset */
- while (ptr && (ARGV_MAX_ARGS > argc)) {
- *ptr = '\0';
- if (ptr[1] != ' ' && ptr[1] != '\0') {
- int offt = (int)(ptr - str + 1);
- kexec_argv[argc] = (int)kbuf + offt;
- argc++;
- }
- ptr = strchr(ptr + 1, ' ');
- }
- if (argc > 1) {
- /* Copy to kernel space */
- copy_from_user(kbuf, (char *)image->segment[i].buf, image->segment[i].bufsz);
- fw_arg0 = kexec_args[0] = argc;
- }
- break;
+ struct kexec_segment *seg;
+
+ seg = &image->segment[i];
+ if (seg->bufsz < 6)
+ continue;
+
+ if (strncmp((char *) seg->buf, "kexec", 5))
+ continue;
+
+ /* don't copy "kexec" */
+ buf = seg->buf + 5;
+ bufsz = seg->bufsz - 5;
+ break;
+ }
+
+ if (!buf)
+ return;
+
+ size = KEXEC_MIPS_ARGV_BUF_SIZE - 1;
+ size = min(size, bufsz);
+ if (size < bufsz)
+ pr_warn("kexec command line truncated to %zd bytes\n", size);
+
+ /* Copy to kernel space */
+ copy_from_user(kexec_argv_buf, buf, size);
+}
+
+static void
+machine_kexec_parse_argv(struct kimage *image)
+{
+ char *reboot_code_buffer;
+ int reloc_delta;
+ char *ptr;
+ int argc;
+ int i;
+
+ ptr = kexec_argv_buf;
+ argc = 0;
+
+ /*
+ * convert command line string to array of parameters
+ * (as bootloader does).
+ */
+ while (ptr && *ptr && (KEXEC_MIPS_ARGV_MAX_ARGS > argc)) {
+ if (*ptr == ' ') {
+ *ptr++ = '\0';
+ continue;
}
+
+ kexec_argv[argc++] = ptr;
+ ptr = strchr(ptr, ' ');
}
- pr_info("argc = %lu\n", kexec_args[0]);
- for (i = 0; i < kexec_args[0]; i++)
- pr_info("argv[%d] = %p, %s\n", i, (char *)kexec_argv[i], (char *)kexec_argv[i]);
+ if (!argc)
+ return;
- return 0;
+ kexec_args[0] = argc;
+ kexec_args[1] = (unsigned long)kexec_argv;
+ kexec_args[2] = 0;
+ kexec_args[3] = 0;
+
+ reboot_code_buffer = page_address(image->control_code_page);
+ reloc_delta = reboot_code_buffer - (char *) &__start___kexec_relocate;
+
+ kexec_args[1] += reloc_delta;
+ for (i = 0; i < argc; i++)
+ kexec_argv[i] += reloc_delta;
}
int
@@ -97,8 +134,14 @@ machine_kexec_prepare(struct kimage *kim
*
* This can be overrided by _machine_kexec_prepare().
*/
- machine_kexec_init_args();
- machine_kexec_pass_args(kimage);
+
+ kexec_args[0] = fw_arg0;
+ kexec_args[1] = fw_arg1;
+ kexec_args[2] = fw_arg2;
+ kexec_args[3] = fw_arg3;
+
+ machine_kexec_init_argv(kimage);
+ machine_kexec_parse_argv(kimage);
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
@@ -154,11 +197,13 @@ machine_kexec(struct kimage *image)
pr_info("kexec_indirection_page = %p\n",
(void *)kexec_indirection_page);
+ pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
+ &__start___kexec_relocate, (void *)reboot_code_buffer);
+
memcpy((void *)reboot_code_buffer, &__start___kexec_relocate,
kexec_relocate_size);
- pr_info("Copy kexec_relocate section from %p to reboot_code_buffer: %p\n",
- &__start___kexec_relocate, (void *)reboot_code_buffer);
+ machine_kexec_print_args();
/*
* The generic kexec code builds a page list with physical

View file

@ -1,20 +0,0 @@
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2322,12 +2322,15 @@ static void dynamic_debug_remove(struct
void * __weak module_alloc(unsigned long size)
{
- return size == 0 ? NULL : vmalloc_exec(size);
+ return vmalloc_exec(size);
}
static void *module_alloc_update_bounds(unsigned long size)
{
- void *ret = module_alloc(size);
+ void *ret = NULL;
+
+ if (size)
+ ret = module_alloc(size);
if (ret) {
mutex_lock(&module_mutex);

View file

@ -1,327 +0,0 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -23,6 +23,14 @@ config MTD_TESTS
WARNING: some of the tests will ERASE entire MTD device which they
test. Do not use these tests unless you really know what you do.
+config MTD_ROOTFS_ROOT_DEV
+ bool "Automatically set 'rootfs' partition to be root filesystem"
+ default y
+
+config MTD_ROOTFS_SPLIT
+ bool "Automatically split 'rootfs' partition for squashfs"
+ default y
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
---help---
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,6 +29,8 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/root_dev.h>
+#include <linux/magic.h>
#include <linux/err.h>
#include "mtdcore.h"
@@ -50,7 +52,7 @@ struct mtd_part {
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
-
+#define IS_PART(mtd) (mtd->read == part_read)
/*
* MTD methods which simply translate the effective address and pass through
@@ -643,6 +645,155 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+#define ROOTFS_SPLIT_NAME "rootfs_data"
+#define ROOTFS_REMOVED_NAME "<removed>"
+
+struct squashfs_super_block {
+ __le32 s_magic;
+ __le32 pad0[9];
+ __le64 bytes_used;
+};
+
+
+static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
+{
+ struct squashfs_super_block sb;
+ int len, ret;
+
+ ret = master->read(master, offset, sizeof(sb), &len, (void *) &sb);
+ if (ret || (len != sizeof(sb))) {
+ printk(KERN_ALERT "split_squashfs: error occured while reading "
+ "from \"%s\"\n", master->name);
+ return -EINVAL;
+ }
+
+ if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
+ printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ if (le64_to_cpu((sb.bytes_used)) <= 0) {
+ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ len = (u32) le64_to_cpu(sb.bytes_used);
+ len += (offset & 0x000fffff);
+ len += (master->erasesize - 1);
+ len &= ~(master->erasesize - 1);
+ len -= (offset & 0x000fffff);
+ *split_offset = offset + len;
+
+ return 0;
+}
+
+static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, const struct mtd_partition *part)
+{
+ struct mtd_partition *dpart;
+ struct mtd_part *slave = NULL;
+ struct mtd_part *spart;
+ int ret, split_offset = 0;
+
+ spart = PART(rpart);
+ ret = split_squashfs(master, spart->offset, &split_offset);
+ if (ret)
+ return ret;
+
+ if (split_offset <= 0)
+ return 0;
+
+ dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
+ if (dpart == NULL) {
+ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
+ ROOTFS_SPLIT_NAME);
+ return -ENOMEM;
+ }
+
+ memcpy(dpart, part, sizeof(*part));
+ dpart->name = (unsigned char *)&dpart[1];
+ strcpy(dpart->name, ROOTFS_SPLIT_NAME);
+
+ dpart->size = rpart->size - (split_offset - spart->offset);
+ dpart->offset = split_offset;
+
+ if (dpart == NULL)
+ return 1;
+
+ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%llX, len=%llX \n",
+ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
+
+ slave = allocate_partition(master, dpart, 0, split_offset);
+ if (IS_ERR(slave))
+ return PTR_ERR(slave);
+ mutex_lock(&mtd_partitions_mutex);
+ list_add(&slave->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(&slave->mtd);
+
+ rpart->split = &slave->mtd;
+
+ return 0;
+}
+
+static int refresh_rootfs_split(struct mtd_info *mtd)
+{
+ struct mtd_partition tpart;
+ struct mtd_part *part;
+ char *name;
+ //int index = 0;
+ int offset, size;
+ int ret;
+
+ part = PART(mtd);
+
+ /* check for the new squashfs offset first */
+ ret = split_squashfs(part->master, part->offset, &offset);
+ if (ret)
+ return ret;
+
+ if ((offset > 0) && !mtd->split) {
+ printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
+ /* if we don't have a rootfs split partition, create a new one */
+ tpart.name = (char *) mtd->name;
+ tpart.size = mtd->size;
+ tpart.offset = part->offset;
+
+ return split_rootfs_data(part->master, &part->mtd, &tpart);
+ } else if ((offset > 0) && mtd->split) {
+ /* update the offsets of the existing partition */
+ size = mtd->size + part->offset - offset;
+
+ part = PART(mtd->split);
+ part->offset = offset;
+ part->mtd.size = size;
+ printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
+ __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
+ (u32) part->offset, (u32) part->mtd.size);
+ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
+ strcpy(name, ROOTFS_SPLIT_NAME);
+ part->mtd.name = name;
+ } else if ((offset <= 0) && mtd->split) {
+ printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
+
+ /* mark existing partition as removed */
+ part = PART(mtd->split);
+ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
+ strcpy(name, ROOTFS_REMOVED_NAME);
+ part->mtd.name = name;
+ part->offset = 0;
+ part->mtd.size = 0;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MTD_ROOTFS_SPLIT */
+
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -659,6 +810,9 @@ int add_mtd_partitions(struct mtd_info *
struct mtd_part *slave;
uint64_t cur_offset = 0;
int i;
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ int ret;
+#endif
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
@@ -673,12 +827,53 @@ int add_mtd_partitions(struct mtd_info *
add_mtd_device(&slave->mtd);
+ if (!strcmp(parts[i].name, "rootfs")) {
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
+ if (ROOT_DEV == 0) {
+ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
+ "set to be root filesystem\n");
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
+ }
+#endif
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ ret = split_rootfs_data(master, &slave->mtd, &parts[i]);
+ /* if (ret == 0)
+ * j++; */
+#endif
+ }
+
cur_offset = slave->offset + slave->mtd.size;
}
return 0;
}
+int mtd_device_refresh(struct mtd_info *mtd)
+{
+ int ret = 0;
+
+ if (IS_PART(mtd)) {
+ struct mtd_part *part;
+ struct mtd_info *master;
+
+ part = PART(mtd);
+ master = part->master;
+ if (master->refresh_device)
+ ret = master->refresh_device(master);
+ }
+
+ if (!ret && mtd->refresh_device)
+ ret = mtd->refresh_device(mtd);
+
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
+ refresh_rootfs_split(mtd);
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtd_device_refresh);
+
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1005,6 +1005,12 @@ static int mtdchar_ioctl(struct file *fi
break;
}
+ case MTDREFRESH:
+ {
+ ret = mtd_device_refresh(mtd);
+ break;
+ }
+
default:
ret = -ENOTTY;
}
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -114,6 +114,7 @@ struct nand_ecclayout {
struct module; /* only needed for owner field in mtd_info */
+struct mtd_info;
struct mtd_info {
u_char type;
uint32_t flags;
@@ -214,6 +215,9 @@ struct mtd_info {
int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
int (*suspend) (struct mtd_info *mtd);
void (*resume) (struct mtd_info *mtd);
+ int (*refresh_device)(struct mtd_info *mtd);
+ struct mtd_info *split;
+
/*
* If the driver is something smart, like UBI, it may need to maintain
* its own reference counting. The below functions are only for driver.
@@ -502,6 +506,7 @@ extern int mtd_device_parse_register(str
int defnr_parts);
#define mtd_device_register(master, parts, nr_parts) \
mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
+extern int mtd_device_refresh(struct mtd_info *master);
extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -36,12 +36,14 @@
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_partition;
struct mtd_partition {
char *name; /* identifier string */
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
+ int (*refresh_partition)(struct mtd_info *);
};
#define MTDPART_OFS_RETAIN (-3)
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -202,6 +202,7 @@ struct otp_info {
* without OOB, e.g., NOR flash.
*/
#define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
+#define MTDREFRESH _IO('M', 50)
/*
* Obsolete legacy interface. Keep it in order not to break userspace

View file

@ -1,146 +0,0 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -35,6 +35,8 @@
#include "mtdcore.h"
+#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -252,13 +254,61 @@ static int part_erase(struct mtd_info *m
return -EROFS;
if (instr->addr >= mtd->size)
return -EINVAL;
+
+ instr->partial_start = false;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ size_t readlen = 0;
+ u64 mtd_ofs;
+
+ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
+ if (!instr->erase_buf)
+ return -ENOMEM;
+
+ mtd_ofs = part->offset + instr->addr;
+ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->addr -= instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ instr->addr + part->offset,
+ part->master->erasesize,
+ &readlen, instr->erase_buf);
+
+ instr->len += instr->erase_buf_ofs;
+ instr->partial_start = true;
+ } else {
+ mtd_ofs = part->offset + part->mtd.size;
+ instr->erase_buf_ofs = part->master->erasesize -
+ do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->len += instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ part->offset + instr->addr +
+ instr->len - part->master->erasesize,
+ part->master->erasesize, &readlen,
+ instr->erase_buf);
+ } else {
+ ret = 0;
+ }
+ }
+ if (ret < 0) {
+ kfree(instr->erase_buf);
+ return ret;
+ }
+
+ }
+
instr->addr += part->offset;
ret = mtd_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL)
+ kfree(instr->erase_buf);
}
+
return ret;
}
@@ -266,7 +316,25 @@ void mtd_erase_callback(struct erase_inf
{
if (instr->mtd->erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
+ size_t wrlen = 0;
+ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
+ if (instr->partial_start) {
+ part->master->write(part->master,
+ instr->addr, instr->erase_buf_ofs,
+ &wrlen, instr->erase_buf);
+ instr->addr += instr->erase_buf_ofs;
+ } else {
+ instr->len -= instr->erase_buf_ofs;
+ part->master->write(part->master,
+ instr->addr + instr->len,
+ instr->erase_buf_ofs, &wrlen,
+ instr->erase_buf +
+ part->master->erasesize -
+ instr->erase_buf_ofs);
+ }
+ kfree(instr->erase_buf);
+ }
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
@@ -537,18 +605,24 @@ static struct mtd_part *allocate_partiti
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of
- * _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- part->name);
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+ if (((u32) slave->mtd.size) > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- part->name);
+ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+
+ if ((u32) slave->mtd.size > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
+ if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
+ printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
+ part->name);
slave->mtd.ecclayout = master->ecclayout;
if (master->block_isbad) {
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -58,6 +58,10 @@ struct erase_info {
u_long priv;
u_char state;
struct erase_info *next;
+
+ u8 *erase_buf;
+ u32 erase_buf_ofs;
+ bool partial_start;
};
struct mtd_erase_region_info {

View file

@ -1,18 +0,0 @@
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -35,6 +35,7 @@
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_info;
struct mtd_partition;
struct mtd_partition {
@@ -52,7 +53,6 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
-struct mtd_info;
struct device_node;
/**

View file

@ -1,30 +0,0 @@
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -267,14 +267,21 @@ static int parse_redboot_partitions(stru
#endif
names += strlen(names)+1;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
- i++;
- parts[i].offset = parts[i-1].size + parts[i-1].offset;
- parts[i].size = fl->next->img->flash_base - parts[i].offset;
- parts[i].name = nullname;
- }
+ if (!strcmp(parts[i].name, "rootfs")) {
+ parts[i].size = fl->next->img->flash_base;
+ parts[i].size &= ~(master->erasesize - 1);
+ parts[i].size -= parts[i].offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ nrparts--;
+ } else {
+ i++;
+ parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
#endif
+ }
+ }
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);

View file

@ -1,35 +0,0 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -156,6 +156,22 @@ config MTD_BCM63XX_PARTS
This provides partions parsing for BCM63xx devices with CFE
bootloaders.
+config MTD_MYLOADER_PARTS
+ tristate "MyLoader partition parsing"
+ depends on ADM5120 || ATHEROS_AR231X || ATHEROS_AR71XX || ATH79
+ ---help---
+ MyLoader is a bootloader which allows the user to define partitions
+ in flash devices, by putting a table in the second erase block
+ on the device, similar to a partition table. This table gives the
+ offsets and lengths of the user defined partitions.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdli
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o

View file

@ -1,116 +0,0 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -231,11 +232,12 @@ static void block2mtd_free_device(struct
/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
{
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
struct block_device *bdev;
struct block2mtd_dev *dev;
+ struct mtd_partition *part;
char *name;
if (!devname)
@@ -274,13 +276,16 @@ static struct block2mtd_dev *add_device(
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+ if (!mtdname)
+ mtdname = devname;
+ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
if (!name)
goto devinit_err;
+ strcpy(name, mtdname);
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
@@ -294,14 +299,17 @@ static struct block2mtd_dev *add_device(
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (mtd_device_register(&dev->mtd, NULL, 0)) {
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = name;
+ part->offset = 0;
+ part->size = dev->mtd.size;
+ if (mtd_device_register(&dev->mtd, part, 1)) {
/* Device didn't get added, so free the entry */
goto devinit_err;
}
list_add(&dev->list, &blkmtd_device_list);
INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
devinit_err:
@@ -374,9 +382,9 @@ static char block2mtd_paramline[80 + 12]
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
char *str = buf;
- char *token[2];
+ char *token[3];
char *name;
size_t erase_size = PAGE_SIZE;
int i, ret;
@@ -387,7 +395,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str)
@@ -406,8 +414,10 @@ static int block2mtd_setup2(const char *
parse_err("illegal erase size");
}
}
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ parse_err("mtd device name too long");
- add_device(name, erase_size);
+ add_device(name, erase_size, token[2]);
return 0;
}
@@ -441,7 +451,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
static int __init block2mtd_init(void)
{
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -546,6 +546,7 @@ int invalidate_partitions(struct gendisk
return 0;
}
+EXPORT_SYMBOL(rescan_partitions);
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
{

View file

@ -1,291 +0,0 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -29,6 +29,8 @@ struct block2mtd_dev {
struct block_device *blkdev;
struct mtd_info mtd;
struct mutex write_mutex;
+ rwlock_t bdev_mutex;
+ char devname[0];
};
@@ -81,6 +83,12 @@ static int block2mtd_erase(struct mtd_in
size_t len = instr->len;
int err;
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev) {
+ err = -EINVAL;
+ goto done;
+ }
+
instr->state = MTD_ERASING;
mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
@@ -92,6 +100,10 @@ static int block2mtd_erase(struct mtd_in
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
+
+done:
+ read_unlock(&dev->bdev_mutex);
+
return err;
}
@@ -103,10 +115,14 @@ static int block2mtd_read(struct mtd_inf
struct page *page;
int index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
- int cpylen;
+ int cpylen, err = 0;
+
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev || (from > mtd->size)) {
+ err = -EINVAL;
+ goto done;
+ }
- if (from > mtd->size)
- return -EINVAL;
if (from + len > mtd->size)
len = mtd->size - from;
@@ -121,10 +137,14 @@ static int block2mtd_read(struct mtd_inf
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
- if (!page)
- return -ENOMEM;
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (!page) {
+ err = -ENOMEM;
+ goto done;
+ }
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto done;
+ }
memcpy(buf, page_address(page) + offset, cpylen);
page_cache_release(page);
@@ -135,7 +155,10 @@ static int block2mtd_read(struct mtd_inf
offset = 0;
index++;
}
- return 0;
+
+done:
+ read_unlock(&dev->bdev_mutex);
+ return err;
}
@@ -187,12 +210,22 @@ static int block2mtd_write(struct mtd_in
size_t *retlen, const u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- int err;
+ int err = 0;
+
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev) {
+ err = -EINVAL;
+ goto done;
+ }
if (!len)
- return 0;
- if (to >= mtd->size)
- return -ENOSPC;
+ goto done;
+
+ if (to >= mtd->size) {
+ err = -ENOSPC;
+ goto done;
+ }
+
if (to + len > mtd->size)
len = mtd->size - to;
@@ -201,6 +234,9 @@ static int block2mtd_write(struct mtd_in
mutex_unlock(&dev->write_mutex);
if (err > 0)
err = 0;
+
+done:
+ read_unlock(&dev->bdev_mutex);
return err;
}
@@ -209,33 +245,110 @@ static int block2mtd_write(struct mtd_in
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
+ read_lock(&dev->bdev_mutex);
+ if (dev->blkdev)
sync_blockdev(dev->blkdev);
+ read_unlock(&dev->bdev_mutex);
+
return;
}
+static int _open_bdev(struct block2mtd_dev *dev)
+{
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block_device *bdev;
+
+ /* Get a handle on the device */
+ bdev = blkdev_get_by_path(dev->devname, mode, dev);
+#ifndef MODULE
+ if (IS_ERR(bdev)) {
+ dev_t devt;
+
+ /* We might not have rootfs mounted at this point. Try
+ to resolve the device name by other means. */
+
+ devt = name_to_dev_t(dev->devname);
+ if (devt)
+ bdev = blkdev_get_by_dev(devt, mode, dev);
+ }
+#endif
+
+ if (IS_ERR(bdev)) {
+ ERROR("error: cannot open device %s", dev->devname);
+ return 1;
+ }
+ dev->blkdev = bdev;
+
+ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+ ERROR("attempting to use an MTD device as a block device");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void _close_bdev(struct block2mtd_dev *dev)
+{
+ struct block_device *bdev;
+
+ if (!dev->blkdev)
+ return;
+
+ bdev = dev->blkdev;
+ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
+ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ dev->blkdev = NULL;
+}
+
static void block2mtd_free_device(struct block2mtd_dev *dev)
{
if (!dev)
return;
kfree(dev->mtd.name);
-
- if (dev->blkdev) {
- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
- 0, -1);
- blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
- }
-
+ _close_bdev(dev);
kfree(dev);
}
-/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname)
+static int block2mtd_refresh(struct mtd_info *mtd)
{
- const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
+ struct block2mtd_dev *dev = mtd->priv;
struct block_device *bdev;
+ dev_t devt;
+ int err = 0;
+
+ /* no other mtd function can run at this point */
+ write_lock(&dev->bdev_mutex);
+
+ /* get the device number for the whole disk */
+ devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
+
+ /* close the old block device */
+ _close_bdev(dev);
+
+ /* open the whole disk, issue a partition rescan, then */
+ bdev = blkdev_get_by_dev(devt, FMODE_WRITE | FMODE_READ, mtd);
+ if (!bdev || !bdev->bd_disk)
+ err = -EINVAL;
+#ifndef CONFIG_MTD_BLOCK2MTD_MODULE
+ else
+ err = rescan_partitions(bdev->bd_disk, bdev);
+#endif
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+
+ /* try to open the partition block device again */
+ _open_bdev(dev);
+ write_unlock(&dev->bdev_mutex);
+
+ return err;
+}
+
+/* FIXME: ensure that mtd->size % erase_size == 0 */
+static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+{
struct block2mtd_dev *dev;
struct mtd_partition *part;
char *name;
@@ -243,36 +356,17 @@ static struct block2mtd_dev *add_device(
if (!devname)
return NULL;
- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
if (!dev)
return NULL;
- /* Get a handle on the device */
- bdev = blkdev_get_by_path(devname, mode, dev);
-#ifndef MODULE
- if (IS_ERR(bdev)) {
-
- /* We might not have rootfs mounted at this point. Try
- to resolve the device name by other means. */
+ strcpy(dev->devname, devname);
- dev_t devt = name_to_dev_t(devname);
- if (devt)
- bdev = blkdev_get_by_dev(devt, mode, dev);
- }
-#endif
-
- if (IS_ERR(bdev)) {
- ERROR("error: cannot open device %s", devname);
+ if (_open_bdev(dev))
goto devinit_err;
- }
- dev->blkdev = bdev;
-
- if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
- ERROR("attempting to use an MTD device as a block device");
- goto devinit_err;
- }
mutex_init(&dev->write_mutex);
+ rwlock_init(&dev->bdev_mutex);
/* Setup the MTD structure */
/* make the name contain the block device in */
@@ -298,6 +392,7 @@ static struct block2mtd_dev *add_device(
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
+ dev->mtd.refresh_device = block2mtd_refresh;
part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
part->name = name;

View file

@ -1,10 +0,0 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -268,6 +268,7 @@ static int _open_bdev(struct block2mtd_d
/* We might not have rootfs mounted at this point. Try
to resolve the device name by other means. */
+ wait_for_device_probe();
devt = name_to_dev_t(dev->devname);
if (devt)
bdev = blkdev_get_by_dev(devt, mode, dev);

View file

@ -1,10 +0,0 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -388,7 +388,6 @@ static struct block2mtd_dev *add_device(
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
dev->mtd.write = block2mtd_write;
- dev->mtd.writev = mtd_writev;
dev->mtd.sync = block2mtd_sync;
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;

View file

@ -1,37 +0,0 @@
---
drivers/mtd/nand/plat_nand.c | 13 ++++++++++++-
include/linux/mtd/nand.h | 1 +
2 files changed, 13 insertions(+), 1 deletion(-)
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -622,6 +622,7 @@ struct platform_nand_chip {
unsigned int options;
unsigned int bbt_options;
const char **part_probe_types;
+ int (*chip_fixup)(struct mtd_info *mtd);
};
/* Keep gcc happy */
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -93,7 +93,18 @@ static int __devinit plat_nand_probe(str
}
/* Scan to find existence of the device */
- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (pdata->chip.chip_fixup) {
+ err = pdata->chip.chip_fixup(&data->mtd);
+ if (err)
+ goto out;
+ }
+
+ if (nand_scan_tail(&data->mtd)) {
err = -ENXIO;
goto out;
}

View file

@ -1,12 +0,0 @@
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -507,8 +507,7 @@ int __nand_correct_data(unsigned char *b
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
return 1; /* error in ECC data; no action needed */
- printk(KERN_ERR "uncorrectable error : ");
- return -1;
+ return -EBADMSG;
}
EXPORT_SYMBOL(__nand_correct_data);

View file

@ -1,11 +0,0 @@
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -682,7 +682,7 @@ static int get_chip(struct map_info *map
return 0;
case FL_ERASING:
- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;

View file

@ -1,39 +0,0 @@
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -45,6 +45,7 @@
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
+#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips*/
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
@@ -625,6 +626,7 @@ struct flash_info {
u16 flags;
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
#define M25P_NO_ERASE 0x02 /* No erase command needed */
+#define SECT_4K_PMC 0x04 /* OPCODE_BE_4K_PMC works uniformly */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -686,6 +688,10 @@ static const struct spi_device_id m25p_i
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ /* PMC -- pm25x "blocks" are 32K, sectors are 4K */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -921,6 +927,9 @@ static int __devinit m25p_probe(struct s
if (info->flags & SECT_4K) {
flash->erase_opcode = OPCODE_BE_4K;
flash->mtd.erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ flash->erase_opcode = OPCODE_BE_4K_PMC;
+ flash->mtd.erasesize = 4096;
} else {
flash->erase_opcode = OPCODE_SE;
flash->mtd.erasesize = info->sector_size;

View file

@ -1,10 +0,0 @@
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -765,6 +765,7 @@ static const struct spi_device_id m25p_i
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },

View file

@ -1,18 +0,0 @@
From: George Kashperko <george@znau.edu.ua>
Issue map read after Write Buffer Load command to ensure chip is ready
to receive data.
Signed-off-by: George Kashperko <george@znau.edu.ua>
---
drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1409,6 +1409,7 @@ static int __xipram do_write_buffer(stru
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
+ (void) map_read(map, cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;

View file

@ -1,41 +0,0 @@
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -102,6 +102,14 @@ config M25PXX_USE_FAST_READ
help
This option enables FAST_READ access supported by ST M25Pxx.
+config M25PXX_PREFER_SMALL_SECTOR_ERASE
+ bool "Prefer small sector erase"
+ depends on MTD_M25P80
+ default y
+ help
+ This option enables use of the small erase sectors if that is
+ supported by the flash chip.
+
config MTD_SST25L
tristate "Support SST25L (non JEDEC) SPI Flash chips"
depends on SPI_MASTER
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -84,6 +84,12 @@
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+#ifdef CONFIG_M25PXX_PREFER_SMALL_SECTOR_ERASE
+#define PREFER_SMALL_SECTOR_ERASE 1
+#else
+#define PREFER_SMALL_SECTOR_ERASE 0
+#endif
+
/****************************************************************************/
struct m25p {
@@ -925,7 +931,7 @@ static int __devinit m25p_probe(struct s
flash->mtd.write = m25p80_write;
/* prefer "small sector" erase if possible */
- if (info->flags & SECT_4K) {
+ if (PREFER_SMALL_SECTOR_ERASE && (info->flags & SECT_4K)) {
flash->erase_opcode = OPCODE_BE_4K;
flash->mtd.erasesize = 4096;
} else if (info->flags & SECT_4K_PMC) {

View file

@ -1,10 +0,0 @@
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -677,6 +677,7 @@ static const struct spi_device_id m25p_i
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },

View file

@ -1,18 +0,0 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -357,7 +357,14 @@ static int part_unlock(struct mtd_info *
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return mtd_unlock(part->master, ofs + part->offset, len);
+
+ ofs += part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ /* round up len to next erasesize and round down offset to prev block */
+ len = (mtd_div_by_eb(len, part->master) + 1) * part->master->erasesize;
+ ofs &= ~(part->master->erasesize - 1);
+ }
+ return mtd_unlock(part->master, ofs, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)

View file

@ -1,25 +0,0 @@
From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001
From: Jonas Gorski <jonas.gorski@gmail.com>
Date: Tue, 12 Apr 2011 19:55:41 +0200
Subject: [PATCH] squashfs: update xz compressor options struct.
Update the xz compressor options struct to match the squashfs userspace
one.
---
fs/squashfs/xz_wrapper.c | 4 +++-
1 files changed, 3 insertions(+), 1 deletions(-)
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -39,8 +39,10 @@ struct squashfs_xz {
};
struct comp_opts {
- __le32 dictionary_size;
__le32 flags;
+ __le16 bit_opts;
+ __le16 fb;
+ __le32 dictionary_size;
};
static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,

Some files were not shown because too many files have changed in this diff Show more