kernel: generic: add kernel 4.3

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>

SVN-Revision: 47182
This commit is contained in:
Hauke Mehrtens 2015-10-11 19:08:08 +00:00
parent 3d1a65adb4
commit b5842aed82
180 changed files with 30055 additions and 0 deletions

View file

@ -5,10 +5,12 @@ LINUX_RELEASE?=1
LINUX_VERSION-3.18 = .21
LINUX_VERSION-4.0 = .9
LINUX_VERSION-4.1 = .10
LINUX_VERSION-4.3 = -rc5
LINUX_KERNEL_MD5SUM-3.18.21 = e4248caaa4cef318c04657e971b37298
LINUX_KERNEL_MD5SUM-4.0.9 = 40fc5f6e2d718e539b45e6601c71985b
LINUX_KERNEL_MD5SUM-4.1.10 = 16953359a8c245d478294af65fd756a8
LINUX_KERNEL_MD5SUM-4.3-rc5 = 3f8f158a3a06c97464d891d84e5e45f3
ifdef KERNEL_PATCHVER
LINUX_VERSION:=$(KERNEL_PATCHVER)$(strip $(LINUX_VERSION-$(KERNEL_PATCHVER)))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,25 @@
Upstream changed the default rootfs to tmpfs when none has been passed
to the kernel - this doesn't fit our purposes, so change it back.
Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -633,6 +633,7 @@ int __init init_rootfs(void)
if (err)
return err;
+#if 0
if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
(!root_fs_names || strstr(root_fs_names, "tmpfs"))) {
err = shmem_init();
@@ -640,6 +641,9 @@ int __init init_rootfs(void)
} else {
err = init_ramfs_fs();
}
+#else
+ err = init_ramfs_fs();
+#endif
if (err)
unregister_filesystem(&rootfs_fs_type);

View file

@ -0,0 +1,505 @@
Subject: netfilter: conntrack: cache route for forwarded connections
... to avoid per-packet FIB lookup if possible.
The cached dst is re-used provided the input interface
is the same as that of the previous packet in the same direction.
If not, the cached dst is invalidated.
For ipv6 we also need to store sernum, else dst_check doesn't work,
pointed out by Eric Dumazet.
This should speed up forwarding when conntrack is already in use
anyway, especially when using reverse path filtering -- active RPF
enforces two FIB lookups for each packet.
Before the routing cache removal this didn't matter since RPF was performed
only when route cache didn't yield a result; but without route cache it
comes at higher price.
Julian Anastasov suggested to add NETDEV_UNREGISTER handler to
avoid holding on to dsts of 'frozen' conntracks.
Signed-off-by: Florian Westphal <fw@strlen.de>
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -30,6 +30,9 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
NF_CT_EXT_SYNPROXY,
#endif
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_RTCACHE)
+ NF_CT_EXT_RTCACHE,
+#endif
NF_CT_EXT_NUM,
};
@@ -43,6 +46,7 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
+#define NF_CT_EXT_RTCACHE_TYPE struct nf_conn_rtcache
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_rtcache.h
@@ -0,0 +1,34 @@
+#include <linux/gfp.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct dst_entry;
+
+struct nf_conn_dst_cache {
+ struct dst_entry *dst;
+ int iif;
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+ u32 cookie;
+#endif
+
+};
+
+struct nf_conn_rtcache {
+ struct nf_conn_dst_cache cached_dst[IP_CT_DIR_MAX];
+};
+
+static inline
+struct nf_conn_rtcache *nf_ct_rtcache_find(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_RTCACHE)
+ return nf_ct_ext_find(ct, NF_CT_EXT_RTCACHE);
+#else
+ return NULL;
+#endif
+}
+
+static inline int nf_conn_rtcache_iif_get(const struct nf_conn_rtcache *rtc,
+ enum ip_conntrack_dir dir)
+{
+ return rtc->cached_dst[dir].iif;
+}
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -114,6 +114,18 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_RTCACHE
+ tristate "Cache route entries in conntrack objects"
+ depends on NETFILTER_ADVANCED
+ depends on NF_CONNTRACK
+ help
+ If this option is enabled, the connection tracking code will
+ cache routing information for each connection that is being
+ forwarded, at a cost of 32 bytes per conntrack object.
+
+ To compile it as a module, choose M here. If unsure, say N.
+ The module will be called nf_conntrack_rtcache.
+
config NF_CONNTRACK_TIMEOUT
bool 'Connection tracking timeout'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_NETFILTER_NETLINK_LOG) += n
# connection tracking
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
+# optional conntrack route cache extension
+obj-$(CONFIG_NF_CONNTRACK_RTCACHE) += nf_conntrack_rtcache.o
+
# SCTP protocol connection tracking
obj-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
--- /dev/null
+++ b/net/netfilter/nf_conntrack_rtcache.c
@@ -0,0 +1,387 @@
+/* route cache for netfilter.
+ *
+ * (C) 2014 Red Hat GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <net/dst.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_rtcache.h>
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+#include <net/ip6_fib.h>
+#endif
+
+static void __nf_conn_rtcache_destroy(struct nf_conn_rtcache *rtc,
+ enum ip_conntrack_dir dir)
+{
+ struct dst_entry *dst = rtc->cached_dst[dir].dst;
+
+ dst_release(dst);
+}
+
+static void nf_conn_rtcache_destroy(struct nf_conn *ct)
+{
+ struct nf_conn_rtcache *rtc = nf_ct_rtcache_find(ct);
+
+ if (!rtc)
+ return;
+
+ __nf_conn_rtcache_destroy(rtc, IP_CT_DIR_ORIGINAL);
+ __nf_conn_rtcache_destroy(rtc, IP_CT_DIR_REPLY);
+}
+
+static void nf_ct_rtcache_ext_add(struct nf_conn *ct)
+{
+ struct nf_conn_rtcache *rtc;
+
+ rtc = nf_ct_ext_add(ct, NF_CT_EXT_RTCACHE, GFP_ATOMIC);
+ if (rtc) {
+ rtc->cached_dst[IP_CT_DIR_ORIGINAL].iif = -1;
+ rtc->cached_dst[IP_CT_DIR_ORIGINAL].dst = NULL;
+ rtc->cached_dst[IP_CT_DIR_REPLY].iif = -1;
+ rtc->cached_dst[IP_CT_DIR_REPLY].dst = NULL;
+ }
+}
+
+static struct nf_conn_rtcache *nf_ct_rtcache_find_usable(struct nf_conn *ct)
+{
+ if (nf_ct_is_untracked(ct))
+ return NULL;
+ return nf_ct_rtcache_find(ct);
+}
+
+static struct dst_entry *
+nf_conn_rtcache_dst_get(const struct nf_conn_rtcache *rtc,
+ enum ip_conntrack_dir dir)
+{
+ return rtc->cached_dst[dir].dst;
+}
+
+static u32 nf_rtcache_get_cookie(int pf, const struct dst_entry *dst)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+ if (pf == NFPROTO_IPV6) {
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+ if (rt->rt6i_node)
+ return (u32)rt->rt6i_node->fn_sernum;
+ }
+#endif
+ return 0;
+}
+
+static void nf_conn_rtcache_dst_set(int pf,
+ struct nf_conn_rtcache *rtc,
+ struct dst_entry *dst,
+ enum ip_conntrack_dir dir, int iif)
+{
+ if (rtc->cached_dst[dir].iif != iif)
+ rtc->cached_dst[dir].iif = iif;
+
+ if (rtc->cached_dst[dir].dst != dst) {
+ struct dst_entry *old;
+
+ dst_hold(dst);
+
+ old = xchg(&rtc->cached_dst[dir].dst, dst);
+ dst_release(old);
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+ if (pf == NFPROTO_IPV6)
+ rtc->cached_dst[dir].cookie =
+ nf_rtcache_get_cookie(pf, dst);
+#endif
+ }
+}
+
+static void nf_conn_rtcache_dst_obsolete(struct nf_conn_rtcache *rtc,
+ enum ip_conntrack_dir dir)
+{
+ struct dst_entry *old;
+
+ pr_debug("Invalidate iif %d for dir %d on cache %p\n",
+ rtc->cached_dst[dir].iif, dir, rtc);
+
+ old = xchg(&rtc->cached_dst[dir].dst, NULL);
+ dst_release(old);
+ rtc->cached_dst[dir].iif = -1;
+}
+
+static unsigned int nf_rtcache_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn_rtcache *rtc;
+ enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ struct dst_entry *dst;
+ struct nf_conn *ct;
+ int iif;
+ u32 cookie;
+
+ if (skb_dst(skb) || skb->sk)
+ return NF_ACCEPT;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return NF_ACCEPT;
+
+ rtc = nf_ct_rtcache_find_usable(ct);
+ if (!rtc)
+ return NF_ACCEPT;
+
+ /* if iif changes, don't use cache and let ip stack
+ * do route lookup.
+ *
+ * If rp_filter is enabled it might toss skb, so
+ * we don't want to avoid these checks.
+ */
+ dir = CTINFO2DIR(ctinfo);
+ iif = nf_conn_rtcache_iif_get(rtc, dir);
+ if (state->in->ifindex != iif) {
+ pr_debug("ct %p, iif %d, cached iif %d, skip cached entry\n",
+ ct, iif, state->in->ifindex);
+ return NF_ACCEPT;
+ }
+ dst = nf_conn_rtcache_dst_get(rtc, dir);
+ if (dst == NULL)
+ return NF_ACCEPT;
+
+ cookie = nf_rtcache_get_cookie(ops->pf, dst);
+
+ dst = dst_check(dst, cookie);
+ pr_debug("obtained dst %p for skb %p, cookie %d\n", dst, skb, cookie);
+ if (likely(dst))
+ skb_dst_set_noref(skb, dst);
+ else
+ nf_conn_rtcache_dst_obsolete(rtc, dir);
+
+ return NF_ACCEPT;
+}
+
+static unsigned int nf_rtcache_forward(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn_rtcache *rtc;
+ enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ struct nf_conn *ct;
+ struct dst_entry *dst = skb_dst(skb);
+ int iif;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return NF_ACCEPT;
+
+ if (dst && dst_xfrm(dst))
+ return NF_ACCEPT;
+
+ if (!nf_ct_is_confirmed(ct)) {
+ if (WARN_ON(nf_ct_rtcache_find(ct)))
+ return NF_ACCEPT;
+ nf_ct_rtcache_ext_add(ct);
+ return NF_ACCEPT;
+ }
+
+ rtc = nf_ct_rtcache_find_usable(ct);
+ if (!rtc)
+ return NF_ACCEPT;
+
+ dir = CTINFO2DIR(ctinfo);
+ iif = nf_conn_rtcache_iif_get(rtc, dir);
+ pr_debug("ct %p, skb %p, dir %d, iif %d, cached iif %d\n",
+ ct, skb, dir, iif, state->in->ifindex);
+ if (likely(state->in->ifindex == iif))
+ return NF_ACCEPT;
+
+ nf_conn_rtcache_dst_set(ops->pf, rtc, skb_dst(skb), dir, state->in->ifindex);
+ return NF_ACCEPT;
+}
+
+static int nf_rtcache_dst_remove(struct nf_conn *ct, void *data)
+{
+ struct nf_conn_rtcache *rtc = nf_ct_rtcache_find(ct);
+ struct net_device *dev = data;
+
+ if (!rtc)
+ return 0;
+
+ if (dev->ifindex == rtc->cached_dst[IP_CT_DIR_ORIGINAL].iif ||
+ dev->ifindex == rtc->cached_dst[IP_CT_DIR_REPLY].iif) {
+ nf_conn_rtcache_dst_obsolete(rtc, IP_CT_DIR_ORIGINAL);
+ nf_conn_rtcache_dst_obsolete(rtc, IP_CT_DIR_REPLY);
+ }
+
+ return 0;
+}
+
+static int nf_rtcache_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+
+ if (event == NETDEV_DOWN)
+ nf_ct_iterate_cleanup(net, nf_rtcache_dst_remove, dev, 0, 0);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_rtcache_notifier = {
+ .notifier_call = nf_rtcache_netdev_event,
+};
+
+static struct nf_hook_ops rtcache_ops[] = {
+ {
+ .hook = nf_rtcache_in,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = nf_rtcache_forward,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP_PRI_LAST,
+ },
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+ {
+ .hook = nf_rtcache_in,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+ },
+ {
+ .hook = nf_rtcache_forward,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP_PRI_LAST,
+ },
+#endif
+};
+
+static struct nf_ct_ext_type rtcache_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_rtcache),
+ .align = __alignof__(struct nf_conn_rtcache),
+ .id = NF_CT_EXT_RTCACHE,
+ .destroy = nf_conn_rtcache_destroy,
+};
+
+static int __init nf_conntrack_rtcache_init(void)
+{
+ int ret = nf_ct_extend_register(&rtcache_extend);
+
+ if (ret < 0) {
+ pr_err("nf_conntrack_rtcache: Unable to register extension\n");
+ return ret;
+ }
+
+ ret = nf_register_hooks(rtcache_ops, ARRAY_SIZE(rtcache_ops));
+ if (ret < 0) {
+ nf_ct_extend_unregister(&rtcache_extend);
+ return ret;
+ }
+
+ ret = register_netdevice_notifier(&nf_rtcache_notifier);
+ if (ret) {
+ nf_unregister_hooks(rtcache_ops, ARRAY_SIZE(rtcache_ops));
+ nf_ct_extend_unregister(&rtcache_extend);
+ }
+
+ return ret;
+}
+
+static int nf_rtcache_ext_remove(struct nf_conn *ct, void *data)
+{
+ struct nf_conn_rtcache *rtc = nf_ct_rtcache_find(ct);
+
+ return rtc != NULL;
+}
+
+static bool __exit nf_conntrack_rtcache_wait_for_dying(struct net *net)
+{
+ bool wait = false;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct nf_conn *ct;
+ struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+ rcu_read_lock();
+ spin_lock_bh(&pcpu->lock);
+
+ hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (nf_ct_rtcache_find(ct) != NULL) {
+ wait = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&pcpu->lock);
+ rcu_read_unlock();
+ }
+
+ return wait;
+}
+
+static void __exit nf_conntrack_rtcache_fini(void)
+{
+ struct net *net;
+ int count = 0;
+
+ /* remove hooks so no new connections get rtcache extension */
+ nf_unregister_hooks(rtcache_ops, ARRAY_SIZE(rtcache_ops));
+
+ synchronize_net();
+
+ unregister_netdevice_notifier(&nf_rtcache_notifier);
+
+ rtnl_lock();
+
+ /* zap all conntracks with rtcache extension */
+ for_each_net(net)
+ nf_ct_iterate_cleanup(net, nf_rtcache_ext_remove, NULL, 0, 0);
+
+ for_each_net(net) {
+ /* .. and make sure they're gone from dying list, too */
+ while (nf_conntrack_rtcache_wait_for_dying(net)) {
+ msleep(200);
+ WARN_ONCE(++count > 25, "Waiting for all rtcache conntracks to go away\n");
+ }
+ }
+
+ rtnl_unlock();
+ synchronize_net();
+ nf_ct_extend_unregister(&rtcache_extend);
+}
+module_init(nf_conntrack_rtcache_init);
+module_exit(nf_conntrack_rtcache_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("Conntrack route cache extension");

View file

@ -0,0 +1,22 @@
--- a/arch/mips/boot/compressed/string.c
+++ b/arch/mips/boot/compressed/string.c
@@ -26,3 +26,19 @@ void *memset(void *s, int c, size_t n)
ss[i] = c;
return s;
}
+
+void *memmove(void *__dest, __const void *__src, size_t count)
+{
+ unsigned char *d = __dest;
+ const unsigned char *s = __src;
+
+ if (__dest == __src)
+ return __dest;
+
+ if (__dest < __src)
+ return memcpy(__dest, __src, count);
+
+ while (count--)
+ d[count] = s[count];
+ return __dest;
+}

View file

@ -0,0 +1,24 @@
From: Felix Fietkau <nbd@openwrt.org>
Date: Mon, 13 Apr 2015 15:54:04 +0200
Subject: [PATCH] bgmac: fix MAC soft-reset bit for corerev > 4
Only core revisions older than 4 use BGMAC_CMDCFG_SR_REV0
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000

View file

@ -0,0 +1,28 @@
From: Felix Fietkau <nbd@openwrt.org>
Date: Mon, 13 Apr 2015 15:56:26 +0200
Subject: [PATCH] bgmac: reset all 4 GMAC cores on init
On a BCM4709 based device, I found that GMAC cores may be enabled at
probe time, but only become usable after a full reset.
Disable cores before re-enabling them to ensure that they are properly
reset.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1641,8 +1641,11 @@ static int bgmac_probe(struct bcma_devic
ns_core = bcma_find_core_unit(core->bus,
BCMA_CORE_MAC_GBIT,
ns_gmac);
- if (ns_core && !bcma_core_is_enabled(ns_core))
- bcma_core_enable(ns_core, 0);
+ if (!ns_core)
+ continue;
+
+ bcma_core_disable(ns_core, 0);
+ bcma_core_enable(ns_core, 0);
}
}

View file

@ -0,0 +1,16 @@
Fix crash with actions performed on the underlying interface (MAC address,
MTU or link state update). This triggers pppoe_flush_dev(), which cleans up
the device without announcing it in sk->sk_state.
Patch by Guillaume Nault (pulled from netdev@vger)
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_d
if (po->pppoe_dev == dev &&
sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk);
- sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);
po->pppoe_dev = NULL;
dev_put(dev);

View file

@ -0,0 +1,82 @@
From 1e311820ec3055e3f08e687de6564692a7cec675 Mon Sep 17 00:00:00 2001
From: Florian Fainelli <florian@openwrt.org>
Date: Mon, 28 Jan 2013 20:06:29 +0100
Subject: [PATCH 11/12] USB: EHCI: add ignore_oc flag to disable overcurrent
checking
This patch adds an ignore_oc flag which can be set by EHCI controller
not supporting or wanting to disable overcurrent checking. The EHCI
platform data in include/linux/usb/ehci_pdriver.h is also augmented to
take advantage of this new flag.
Signed-off-by: Florian Fainelli <florian@openwrt.org>
---
drivers/usb/host/ehci-hcd.c | 2 +-
drivers/usb/host/ehci-hub.c | 4 ++--
drivers/usb/host/ehci-platform.c | 1 +
drivers/usb/host/ehci.h | 1 +
include/linux/usb/ehci_pdriver.h | 1 +
5 files changed, 6 insertions(+), 3 deletions(-)
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -639,7 +639,7 @@ static int ehci_run (struct usb_hcd *hcd
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
temp >> 8, temp & 0xff,
- ignore_oc ? ", overcurrent ignored" : "");
+ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
ehci_writel(ehci, INTR_MASK,
&ehci->regs->intr_enable); /* Turn On Interrupts */
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -634,7 +634,7 @@ ehci_hub_status_data (struct usb_hcd *hc
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
- if (!ignore_oc)
+ if (!ignore_oc && !ehci->ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
@@ -996,7 +996,7 @@ int ehci_hub_control(
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
- if ((temp & PORT_OCC) && !ignore_oc){
+ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -251,6 +251,8 @@ static int ehci_platform_probe(struct pl
hcd->has_tt = 1;
if (pdata->reset_on_resume)
priv->reset_on_resume = true;
+ if (pdata->ignore_oc)
+ ehci->ignore_oc = 1;
#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
if (ehci->big_endian_mmio) {
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -227,6 +227,7 @@ struct ehci_hcd { /* one per controlle
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
unsigned need_oc_pp_cycle:1; /* MPC834X port power */
unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
+ unsigned ignore_oc:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -49,6 +49,7 @@ struct usb_ehci_pdata {
unsigned no_io_watchdog:1;
unsigned reset_on_resume:1;
unsigned dma_mask_64:1;
+ unsigned ignore_oc:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);

View file

@ -0,0 +1,86 @@
From: Felix Fietkau <nbd@openwrt.org>
Date: Fri, 10 Apr 2015 13:35:29 +0200
Subject: [PATCH] jffs2: use .rename2 and add RENAME_WHITEOUT support
It is required for renames on overlayfs
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -35,7 +35,7 @@ static int jffs2_mkdir (struct inode *,s
static int jffs2_rmdir (struct inode *,struct dentry *);
static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t);
static int jffs2_rename (struct inode *, struct dentry *,
- struct inode *, struct dentry *);
+ struct inode *, struct dentry *, unsigned int);
const struct file_operations jffs2_dir_operations =
{
@@ -57,7 +57,7 @@ const struct inode_operations jffs2_dir_
.mkdir = jffs2_mkdir,
.rmdir = jffs2_rmdir,
.mknod = jffs2_mknod,
- .rename = jffs2_rename,
+ .rename2 = jffs2_rename,
.get_acl = jffs2_get_acl,
.set_acl = jffs2_set_acl,
.setattr = jffs2_setattr,
@@ -757,8 +757,27 @@ static int jffs2_mknod (struct inode *di
return ret;
}
+static int jffs2_whiteout(struct inode *old_dir, struct dentry *old_dentry)
+{
+ struct dentry *wh;
+ int err;
+
+ wh = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
+ if (!wh)
+ return -ENOMEM;
+
+ err = jffs2_mknod(old_dir, wh, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV);
+ if (err)
+ return err;
+
+ d_rehash(wh);
+ return 0;
+}
+
static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
- struct inode *new_dir_i, struct dentry *new_dentry)
+ struct inode *new_dir_i, struct dentry *new_dentry,
+ unsigned int flags)
{
int ret;
struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
@@ -766,6 +785,9 @@ static int jffs2_rename (struct inode *o
uint8_t type;
uint32_t now;
+ if (flags & ~RENAME_WHITEOUT)
+ return -EINVAL;
+
/* The VFS will check for us and prevent trying to rename a
* file over a directory and vice versa, but if it's a directory,
* the VFS can't check whether the victim is empty. The filesystem
@@ -829,9 +851,14 @@ static int jffs2_rename (struct inode *o
if (d_is_dir(old_dentry) && !victim_f)
inc_nlink(new_dir_i);
- /* Unlink the original */
- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
+ if (flags & RENAME_WHITEOUT)
+ /* Replace with whiteout */
+ ret = jffs2_whiteout(old_dir_i, old_dentry);
+ else
+ /* Unlink the original */
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+ old_dentry->d_name.name,
+ old_dentry->d_name.len, NULL, now);
/* We don't touch inode->i_nlink */

View file

@ -0,0 +1,58 @@
From: Felix Fietkau <nbd@openwrt.org>
Date: Sat, 25 Apr 2015 12:41:32 +0200
Subject: [PATCH] jffs2: add RENAME_EXCHANGE support
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -785,7 +785,7 @@ static int jffs2_rename (struct inode *o
uint8_t type;
uint32_t now;
- if (flags & ~RENAME_WHITEOUT)
+ if (flags & ~(RENAME_WHITEOUT | RENAME_EXCHANGE))
return -EINVAL;
/* The VFS will check for us and prevent trying to rename a
@@ -793,7 +793,7 @@ static int jffs2_rename (struct inode *o
* the VFS can't check whether the victim is empty. The filesystem
* needs to do that for itself.
*/
- if (d_really_is_positive(new_dentry)) {
+ if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) {
victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
if (d_is_dir(new_dentry)) {
struct jffs2_full_dirent *fd;
@@ -828,7 +828,7 @@ static int jffs2_rename (struct inode *o
if (ret)
return ret;
- if (victim_f) {
+ if (victim_f && !(flags & RENAME_EXCHANGE)) {
/* There was a victim. Kill it off nicely */
if (d_is_dir(new_dentry))
clear_nlink(d_inode(new_dentry));
@@ -854,6 +854,12 @@ static int jffs2_rename (struct inode *o
if (flags & RENAME_WHITEOUT)
/* Replace with whiteout */
ret = jffs2_whiteout(old_dir_i, old_dentry);
+ else if (flags & RENAME_EXCHANGE)
+ /* Replace the original */
+ ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i),
+ d_inode(new_dentry)->i_ino, type,
+ old_dentry->d_name.name, old_dentry->d_name.len,
+ now);
else
/* Unlink the original */
ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
@@ -880,7 +886,7 @@ static int jffs2_rename (struct inode *o
return ret;
}
- if (d_is_dir(old_dentry))
+ if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE))
drop_nlink(old_dir_i);
new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);

View file

@ -0,0 +1,54 @@
From: Stephen Hemminger <stephen@networkplumber.org>
Subject: bridge: allow receiption on disabled port
When an ethernet device is enslaved to a bridge, and the bridge STP
detects loss of carrier (or operational state down), then normally
packet receiption is blocked.
This breaks control applications like WPA which maybe expecting to
receive packets to negotiate to bring link up. The bridge needs to
block forwarding packets from these disabled ports, but there is no
hard requirement to not allow local packet delivery.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -211,11 +211,13 @@ EXPORT_SYMBOL_GPL(br_handle_frame_finish
static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
- u16 vid = 0;
+ if (p->state != BR_STATE_DISABLED) {
+ u16 vid = 0;
- /* check if vlan is allowed, to avoid spoofing */
- if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+ /* check if vlan is allowed, to avoid spoofing */
+ if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+ }
return 0; /* process further */
}
@@ -289,6 +291,18 @@ rx_handler_result_t br_handle_frame(stru
forward:
switch (p->state) {
+ case BR_STATE_DISABLED:
+ if (ether_addr_equal(p->br->dev->dev_addr, dest))
+ skb->pkt_type = PACKET_HOST;
+
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb, skb->dev, NULL,
+ br_handle_local_finish))
+ break;
+
+ BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
+ br_pass_frame_up(skb);
+ break;
+
case BR_STATE_FORWARDING:
rhook = rcu_dereference(br_should_route_hook);
if (rhook) {

View file

@ -0,0 +1,778 @@
From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@openwrt.org>
Date: Mon, 12 Aug 2013 12:50:22 +0200
Subject: [PATCH] MIPS: partially inline dma ops
Several DMA ops are no-op on many platforms, and the indirection through
the mips_dma_map_ops function table is causing the compiler to emit
unnecessary code.
Inlining visibly improves network performance in my tests (on a 24Kc
based system), and also slightly reduces code size of a few drivers.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
arch/mips/Kconfig | 4 +
arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++-
arch/mips/mm/dma-default.c | 163 ++--------------
3 files changed, 373 insertions(+), 154 deletions(-)
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1586,6 +1586,7 @@ config CPU_CAVIUM_OCTEON
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select MIPS_L1_CACHE_SHIFT_7
+ select SYS_HAS_DMA_OPS
help
The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor
@@ -1881,6 +1882,9 @@ config MIPS_MALTA_PM
bool
default y
+config SYS_HAS_DMA_OPS
+ bool
+
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -1,9 +1,16 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
+#include <linux/kmemcheck.h>
+#include <linux/bug.h>
#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/dma-attrs.h>
+
#include <asm/dma-coherence.h>
#include <asm/cache.h>
+#include <asm/cpu-type.h>
+#include <asm-generic/dma-coherent.h>
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
#include <dma-coherence.h>
@@ -11,12 +18,53 @@
extern struct dma_map_ops *mips_dma_map_ops;
+void __dma_sync(struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs);
+void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs);
+
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
+#ifdef CONFIG_SYS_HAS_DMA_OPS
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
else
return mips_dma_map_ops;
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
+ * speculatively fill random cachelines with stale data at any time,
+ * requiring an extra flush post-DMA.
+ *
+ * Warning on the terminology - Linux calls an uncached area coherent;
+ * MIPS terminology calls memory areas with hardware maintained coherency
+ * coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this
+ * condition. However this function is only called on non-I/O-coherent
+ * systems and only the R10000 and R12000 are used in such systems, the
+ * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
+ */
+static inline int cpu_needs_post_dma_flush(struct device *dev)
+{
+ return !plat_device_is_coherent(dev) &&
+ (boot_cpu_type() == CPU_R10000 ||
+ boot_cpu_type() == CPU_R12000 ||
+ boot_cpu_type() == CPU_BMIPS5000);
+}
+
+static inline struct page *dma_addr_to_page(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return pfn_to_page(
+ plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
@@ -29,9 +77,399 @@ static inline bool dma_capable(struct de
static inline void dma_mark_clean(void *addr, size_t size) {}
-#include <asm-generic/dma-mapping-common.h>
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
+ struct page *page = virt_to_page(ptr);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ } else {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, offset, size, dir);
+
+ addr = plat_map_dma_mem_page(dev, page) + offset;
+ }
+ debug_dma_map_page(dev, page, offset, size, dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->unmap_page(dev, addr, size, dir, attrs);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ plat_post_dma_flush(dev);
+ plat_unmap_dma_mem(dev, addr, size, dir);
+ }
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ } else {
+ for_each_sg(sg, s, nents, i) {
+ struct page *page = sg_page(s);
+
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, s->offset, s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address =
+ plat_map_dma_mem_page(dev, page) + s->offset;
+ }
+ ents = nents;
+ }
+ BUG_ON(ents < 0);
+ debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+ return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+ if (ops) {
+ ops->unmap_sg(dev, sg, nents, dir, attrs);
+ return;
+ }
+ for_each_sg(sg, s, nents, i) {
+ if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
+ }
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ } else {
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(page, offset, size, dir);
+
+ addr = plat_map_dma_mem_page(dev, page) + offset;
+ }
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->unmap_page(dev, addr, size, dir, NULL);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ plat_post_dma_flush(dev);
+ plat_unmap_dma_mem(dev, addr, size, dir);
+ }
+ debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->sync_single_for_cpu(dev, addr, size, dir);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ plat_post_dma_flush(dev);
+ }
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_device(dev, addr, size, dir);
+ else if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, addr),
+ addr & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+ } else {
+ if (cpu_needs_post_dma_flush(dev))
+ __dma_sync(dma_addr_to_page(dev, addr + offset),
+ (addr + offset) & ~PAGE_MASK, size, dir);
+ plat_post_dma_flush(dev);
+ }
+
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops)
+ ops->sync_single_for_device(dev, addr + offset, size, dir);
+ else if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, addr + offset),
+ (addr + offset) & ~PAGE_MASK, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ } else if (cpu_needs_post_dma_flush(dev)) {
+ for_each_sg(sg, s, nelems, i)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ }
+ plat_post_dma_flush(dev);
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops) {
+ ops->sync_sg_for_device(dev, sg, nelems, dir);
+ } else if (!plat_device_is_coherent(dev)) {
+ for_each_sg(sg, s, nelems, i)
+ __dma_sync(sg_page(s), s->offset, s->length, dir);
+ }
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops && ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops && ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops)
+ return ops->dma_supported(dev, mask);
+ return plat_dma_supported(dev, mask);
+}
+
+static inline int dma_mapping_error(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_mapping_error(dev, mask);
+ if (ops)
+ return ops->mapping_error(dev, mask);
+ return 0;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if(!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ if (ops && ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ void *ret;
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops)
+ ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
+ else
+ ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
+ attrs);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+
+ return ret;
+}
+
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops)
+ ops->free(dev, size, vaddr, dma_handle, attrs);
+ else
+ mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
+
+ debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+
#endif /* _ASM_DMA_MAPPING_H */
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -46,35 +46,6 @@ static int __init setnocoherentio(char *
early_param("nocoherentio", setnocoherentio);
#endif
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- *
- * Note that the R14000 and R16000 should also be checked for in this
- * condition. However this function is only called on non-I/O-coherent
- * systems and only the R10000 and R12000 are used in such systems, the
- * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
- */
-static inline int cpu_needs_post_dma_flush(struct device *dev)
-{
- return !plat_device_is_coherent(dev) &&
- (boot_cpu_type() == CPU_R10000 ||
- boot_cpu_type() == CPU_R12000 ||
- boot_cpu_type() == CPU_BMIPS5000);
-}
-
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
@@ -129,7 +100,7 @@ static void *mips_dma_alloc_noncoherent(
return ret;
}
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+void *mips_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -165,6 +136,7 @@ static void *mips_dma_alloc_coherent(str
return ret;
}
+EXPORT_SYMBOL(mips_dma_alloc_coherent);
static void mips_dma_free_noncoherent(struct device *dev, size_t size,
@@ -174,7 +146,7 @@ static void mips_dma_free_noncoherent(st
free_pages((unsigned long) vaddr, get_order(size));
}
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
@@ -196,40 +168,7 @@ static void mips_dma_free_coherent(struc
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
-
-static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
-{
- unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long addr = (unsigned long)cpu_addr;
- unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
- int ret = -ENXIO;
-
- if (!plat_device_is_coherent(dev) && !hw_coherentio)
- addr = CAC_ADDR(addr);
-
- pfn = page_to_pfn(virt_to_page((void *)addr));
-
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
-}
+EXPORT_SYMBOL(mips_dma_free_coherent);
static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
@@ -258,7 +197,7 @@ static inline void __dma_sync_virtual(vo
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
-static inline void __dma_sync(struct page *page,
+void __dma_sync(struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
size_t left = size;
@@ -288,120 +227,7 @@ static inline void __dma_sync(struct pag
left -= len;
} while (left);
}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- if (!plat_device_is_coherent(dev))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nhwentries, i) {
- if (!plat_device_is_coherent(dev) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (cpu_needs_post_dma_flush(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (!plat_device_is_coherent(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
-}
-
-int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
+EXPORT_SYMBOL(__dma_sync);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
@@ -414,24 +240,10 @@ void dma_cache_sync(struct device *dev,
EXPORT_SYMBOL(dma_cache_sync);
-static struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .mmap = mips_dma_mmap,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .mapping_error = mips_dma_mapping_error,
- .dma_supported = mips_dma_supported
-};
-
-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+#ifdef CONFIG_SYS_HAS_DMA_OPS
+struct dma_map_ops *mips_dma_map_ops = NULL;
EXPORT_SYMBOL(mips_dma_map_ops);
+#endif
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

View file

@ -0,0 +1,183 @@
From 3e7056c3a369e9ef9ca804bc626b60ef6b62ee27 Mon Sep 17 00:00:00 2001
From: Hauke Mehrtens <hauke@hauke-m.de>
Date: Sun, 17 May 2015 18:48:38 +0200
Subject: [PATCH 2/3] mtd: part: add generic parsing of linux,part-probe
This moves the linux,part-probe device tree parsing code from
physmap_of.c to mtdpart.c. Now all drivers can use this feature by just
providing a reference to their device tree node in struct
mtd_part_parser_data.
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
---
Documentation/devicetree/bindings/mtd/nand.txt | 16 +++++++++
drivers/mtd/maps/physmap_of.c | 46 +-------------------------
drivers/mtd/mtdpart.c | 45 +++++++++++++++++++++++++
3 files changed, 62 insertions(+), 45 deletions(-)
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -12,6 +12,22 @@
- nand-ecc-step-size: integer representing the number of data bytes
that are covered by a single ECC step.
+- linux,part-probe: list of name as strings of the partition parser
+ which should be used to parse the partition table.
+ They will be tried in the specified ordering and
+ the next one will be used if the previous one
+ failed.
+
+ Example: linux,part-probe = "cmdlinepart", "ofpart";
+
+ This is also the default value, which will be used
+ if this attribute is not specified. It could be
+ that the flash driver in use overwrote the default
+ value and uses some other default.
+
+ Possible values are: bcm47xxpart, afs, ar7part,
+ ofoldpart, ofpart, bcm63xxpart, RedBoot, cmdlinepart
+
The ECC strength and ECC step size properties define the correction capability
of a controller. Together, they say a controller can correct "{strength} bit
errors per {size} bytes".
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -112,47 +112,9 @@ static struct mtd_info *obsolete_probe(s
static const char * const part_probe_types_def[] = {
"cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
-static const char * const *of_get_probes(struct device_node *dp)
-{
- const char *cp;
- int cplen;
- unsigned int l;
- unsigned int count;
- const char **res;
-
- cp = of_get_property(dp, "linux,part-probe", &cplen);
- if (cp == NULL)
- return part_probe_types_def;
-
- count = 0;
- for (l = 0; l != cplen; l++)
- if (cp[l] == 0)
- count++;
-
- res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
- if (!res)
- return NULL;
- count = 0;
- while (cplen > 0) {
- res[count] = cp;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
- count++;
- }
- return res;
-}
-
-static void of_free_probes(const char * const *probes)
-{
- if (probes != part_probe_types_def)
- kfree(probes);
-}
-
static const struct of_device_id of_flash_match[];
static int of_flash_probe(struct platform_device *dev)
{
- const char * const *part_probe_types;
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
@@ -312,14 +274,8 @@ static int of_flash_probe(struct platfor
goto err_out;
ppdata.of_node = dp;
- part_probe_types = of_get_probes(dp);
- if (!part_probe_types) {
- err = -ENOMEM;
- goto err_out;
- }
- mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
+ mtd_device_parse_register(info->cmtd, part_probe_types_def, &ppdata,
NULL, 0);
- of_free_probes(part_probe_types);
kfree(mtd_list);
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,6 +29,7 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/err.h>
#include <linux/kconfig.h>
@@ -719,6 +720,42 @@ void deregister_mtd_parser(struct mtd_pa
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
/*
+ * Parses the linux,part-probe device tree property.
+ * When a non null value is returned it has to be freed with kfree() by
+ * the caller.
+ */
+static const char * const *of_get_probes(struct device_node *dp)
+{
+ const char *cp;
+ int cplen;
+ unsigned int l;
+ unsigned int count;
+ const char **res;
+
+ cp = of_get_property(dp, "linux,part-probe", &cplen);
+ if (cp == NULL)
+ return NULL;
+
+ count = 0;
+ for (l = 0; l != cplen; l++)
+ if (cp[l] == 0)
+ count++;
+
+ res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
+ count = 0;
+ while (cplen > 0) {
+ res[count] = cp;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ count++;
+ }
+ return res;
+}
+
+/*
* Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
* are changing this array!
*/
@@ -754,6 +791,13 @@ int parse_mtd_partitions(struct mtd_info
{
struct mtd_part_parser *parser;
int ret = 0;
+ const char *const *types_of = NULL;
+
+ if (data && data->of_node) {
+ types_of = of_get_probes(data->of_node);
+ if (types_of != NULL)
+ types = types_of;
+ }
if (!types)
types = default_mtd_part_types;
@@ -772,6 +816,7 @@ int parse_mtd_partitions(struct mtd_info
break;
}
}
+ kfree(types_of);
return ret;
}

View file

@ -0,0 +1,35 @@
From a95f03e51471dbdbafd3391991d867ac2358ed02 Mon Sep 17 00:00:00 2001
From: Jonas Gorski <jogo@openwrt.org>
Date: Sun, 23 Aug 2015 14:23:29 +0200
Subject: [PATCH] usb: ehci-orion: fix probe for !GENERIC_PHY
Commit d445913ce0ab7f ("usb: ehci-orion: add optional PHY support")
added support for optional phys, but devm_phy_optional_get returns
-ENOSYS if GENERIC_PHY is not enabled.
This causes probe failures, even when there are no phys specified:
[ 1.443365] orion-ehci f1058000.usb: init f1058000.usb fail, -38
[ 1.449403] orion-ehci: probe of f1058000.usb failed with error -38
Similar to dwc3, treat -ENOSYS as no phy.
Fixes: d445913ce0ab7f ("usb: ehci-orion: add optional PHY support")
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
---
drivers/usb/host/ehci-orion.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -224,7 +224,8 @@ static int ehci_orion_drv_probe(struct p
priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
- goto err_phy_get;
+ if (err != -ENOSYS)
+ goto err_phy_get;
} else {
err = phy_init(priv->phy);
if (err)

View file

@ -0,0 +1,11 @@
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -165,7 +165,7 @@ else
# annotated or signed tagged state (as git describe only
# looks at signed or annotated tags - git tag -a/-s) and
# LOCALVERSION= is not specified
- if test "${LOCALVERSION+set}" != "set"; then
+ if test "${CONFIG_LOCALVERSION+set}" != "set"; then
scm=$(scm_version --short)
res="$res${scm:++}"
fi

View file

@ -0,0 +1,14 @@
--- a/Makefile
+++ b/Makefile
@@ -607,9 +607,9 @@ include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += -Os $(EXTRA_OPTIMIZATION) $(call cc-disable-warning,maybe-uninitialized,)
else
-KBUILD_CFLAGS += -O2
+KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
endif
# Tell gcc to never replace conditional load with a non-conditional one

View file

@ -0,0 +1,11 @@
--- a/Makefile
+++ b/Makefile
@@ -398,7 +398,7 @@ KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)

View file

@ -0,0 +1,108 @@
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -58,6 +58,7 @@ static struct addr_range percpu_range =
static struct sym_entry *table;
static unsigned int table_size, table_cnt;
static int all_symbols = 0;
+static int uncompressed = 0;
static int absolute_percpu = 0;
static char symbol_prefix_char = '\0';
static unsigned long long kernel_start_addr = 0;
@@ -403,6 +404,9 @@ static void write_src(void)
free(markers);
+ if (uncompressed)
+ return;
+
output_label("kallsyms_token_table");
off = 0;
for (i = 0; i < 256; i++) {
@@ -461,6 +465,9 @@ static void *find_token(unsigned char *s
{
int i;
+ if (uncompressed)
+ return NULL;
+
for (i = 0; i < len - 1; i++) {
if (str[i] == token[0] && str[i+1] == token[1])
return &str[i];
@@ -533,6 +540,9 @@ static void optimize_result(void)
{
int i, best;
+ if (uncompressed)
+ return;
+
/* using the '\0' symbol last allows compress_symbols to use standard
* fast string functions */
for (i = 255; i >= 0; i--) {
@@ -703,7 +713,9 @@ int main(int argc, char **argv)
} else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
const char *p = &argv[i][14];
kernel_start_addr = strtoull(p, NULL, 16);
- } else
+ } else if (strcmp(argv[i], "--uncompressed") == 0)
+ uncompressed = 1;
+ else
usage();
}
} else if (argc != 1)
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1345,6 +1345,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
the unaligned access emulation.
see arch/parisc/kernel/unaligned.c for reference
+config KALLSYMS_UNCOMPRESSED
+ bool "Keep kallsyms uncompressed"
+ depends on KALLSYMS
+ help
+ Normally kallsyms contains compressed symbols (using a token table),
+ reducing the uncompressed kernel image size. Keeping the symbol table
+ uncompressed significantly improves the size of this part in compressed
+ kernel images.
+
+ Say N unless you need compressed kernel images to be small.
+
config HAVE_PCSPKR_PLATFORM
bool
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -90,6 +90,10 @@ kallsyms()
kallsymopt="${kallsymopt} --absolute-percpu"
fi
+ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
+ kallsymopt="${kallsymopt} --uncompressed"
+ fi
+
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -109,6 +109,11 @@ static unsigned int kallsyms_expand_symb
* For every byte on the compressed symbol data, copy the table
* entry for that byte.
*/
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+ memcpy(result, data + 1, len - 1);
+ result += len - 1;
+ len = 0;
+#endif
while (len) {
tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
data++;
@@ -141,6 +146,9 @@ tail:
*/
static char kallsyms_get_symbol_type(unsigned int off)
{
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+ return kallsyms_names[off + 1];
+#endif
/*
* Get just the first code, look it up in the token table,
* and return the first char from this token.

View file

@ -0,0 +1,194 @@
From: Felix Fietkau <nbd@openwrt.org>
Subject: [PATCH] build: add a hack for removing non-essential module info
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -169,9 +169,10 @@ void trim_init_extable(struct module *m)
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+#define MODULE_INFO_STRIP(tag, info) __MODULE_INFO_STRIP(tag, tag, info)
/* For userspace: you can also call me... */
-#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
+#define MODULE_ALIAS(_alias) MODULE_INFO_STRIP(alias, _alias)
/* Soft module dependencies. See man modprobe.d for details.
* Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
@@ -212,12 +213,12 @@ void trim_init_extable(struct module *m)
* Author(s), use "Name <email>" or just "Name", for multiple
* authors use multiple MODULE_AUTHOR() statements/lines.
*/
-#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
+#define MODULE_AUTHOR(_author) MODULE_INFO_STRIP(author, _author)
/* What your module does. */
-#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+#define MODULE_DESCRIPTION(_description) MODULE_INFO_STRIP(description, _description)
-#ifdef MODULE
+#if defined(MODULE) && !defined(CONFIG_MODULE_STRIPPED)
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
extern const typeof(name) __mod_##type##__##name##_device_table \
@@ -244,7 +245,9 @@ extern const typeof(name) __mod_##type##
*/
#if defined(MODULE) || !defined(CONFIG_SYSFS)
-#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#define MODULE_VERSION(_version) MODULE_INFO_STRIP(version, _version)
+#elif defined(CONFIG_MODULE_STRIPPED)
+#define MODULE_VERSION(_version) __MODULE_INFO_DISABLED(version)
#else
#define MODULE_VERSION(_version) \
static struct module_version_attribute ___modver_attr = { \
@@ -266,7 +269,7 @@ extern const typeof(name) __mod_##type##
/* Optional firmware file (or files) needed by the module
* format is simply firmware file name. Multiple firmware
* files require multiple MODULE_FIRMWARE() specifiers */
-#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+#define MODULE_FIRMWARE(_firmware) MODULE_INFO_STRIP(firmware, _firmware)
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,6 +16,16 @@
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO_DISABLED(name) \
+ struct __UNIQUE_ID(name) {}
+
+#ifdef CONFIG_MODULE_STRIPPED
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO_DISABLED(name)
+#else
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO(tag, name, info)
+#endif
+
#ifdef MODULE
#define __MODULE_INFO(tag, name, info) \
static const char __UNIQUE_ID(name)[] \
@@ -23,8 +33,7 @@ static const char __UNIQUE_ID(name)[]
= __stringify(tag) "=" info
#else /* !MODULE */
/* This struct is here for syntactic coherency, it is not used */
-#define __MODULE_INFO(tag, name, info) \
- struct __UNIQUE_ID(name) {}
+#define __MODULE_INFO(tag, name, info) __MODULE_INFO_DISABLED(name)
#endif
#define __MODULE_PARM_TYPE(name, _type) \
__MODULE_INFO(parmtype, name##type, #name ":" _type)
@@ -32,7 +41,7 @@ static const char __UNIQUE_ID(name)[]
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
#define MODULE_PARM_DESC(_parm, desc) \
- __MODULE_INFO(parm, _parm, #_parm ":" desc)
+ __MODULE_INFO_STRIP(parm, _parm, #_parm ":" desc)
struct kernel_param;
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2026,6 +2026,13 @@ config MODULE_COMPRESS_XZ
endchoice
+config MODULE_STRIPPED
+ bool "Reduce module size"
+ depends on MODULES
+ help
+ Remove module parameter descriptions, author info, version, aliases,
+ device tables, etc.
+
endif # MODULES
config MODULES_TREE_LOOKUP
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2836,6 +2836,7 @@ static struct module *setup_load_info(st
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
+#ifndef CONFIG_MODULE_STRIPPED
const char *modmagic = get_modinfo(info, "vermagic");
int err;
@@ -2861,6 +2862,7 @@ static int check_modinfo(struct module *
pr_warn("%s: module is from the staging directory, the quality "
"is unknown, you have been warned.\n", mod->name);
}
+#endif
/* Set up license info based on the info section */
set_license(mod, get_modinfo(info, "license"));
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1960,7 +1960,9 @@ static void read_symbols(char *modname)
symname = remove_dot(info.strtab + sym->st_name);
handle_modversions(mod, &info, sym, symname);
+#ifndef CONFIG_MODULE_STRIPPED
handle_moddevtable(mod, &info, sym, symname);
+#endif
}
if (!is_vmlinux(modname) ||
(is_vmlinux(modname) && vmlinux_section_warnings))
@@ -2104,7 +2106,9 @@ static void add_header(struct buffer *b,
buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
+#ifndef CONFIG_MODULE_STRIPPED
buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
+#endif
buf_printf(b, "\n");
buf_printf(b, "__visible struct module __this_module\n");
buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n");
@@ -2121,16 +2125,20 @@ static void add_header(struct buffer *b,
static void add_intree_flag(struct buffer *b, int is_intree)
{
+#ifndef CONFIG_MODULE_STRIPPED
if (is_intree)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
+#endif
}
static void add_staging_flag(struct buffer *b, const char *name)
{
+#ifndef CONFIG_MODULE_STRIPPED
static const char *staging_dir = "drivers/staging";
if (strncmp(staging_dir, name, strlen(staging_dir)) == 0)
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
+#endif
}
/**
@@ -2223,11 +2231,13 @@ static void add_depends(struct buffer *b
static void add_srcversion(struct buffer *b, struct module *mod)
{
+#ifndef CONFIG_MODULE_STRIPPED
if (mod->srcversion[0]) {
buf_printf(b, "\n");
buf_printf(b, "MODULE_INFO(srcversion, \"%s\");\n",
mod->srcversion);
}
+#endif
}
static void write_if_changed(struct buffer *b, const char *fname)
@@ -2458,7 +2468,9 @@ int main(int argc, char **argv)
add_staging_flag(&buf, mod->name);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
+#ifndef CONFIG_MODULE_STRIPPED
add_moddevtable(&buf, mod);
+#endif
add_srcversion(&buf, mod);
sprintf(fname, "%s.mod.c", mod->name);

View file

@ -0,0 +1,36 @@
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -618,8 +618,10 @@ char *symbol_string(char *buf, char *end
struct printf_spec spec, const char *fmt)
{
unsigned long value;
-#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
+#ifndef CONFIG_KALLSYMS
+ struct module *mod;
+ int len;
#endif
if (fmt[1] == 'R')
@@ -633,15 +635,15 @@ char *symbol_string(char *buf, char *end
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
-
- return string(buf, end, sym, spec);
#else
- spec.field_width = 2 * sizeof(void *);
- spec.flags |= SPECIAL | SMALL | ZEROPAD;
- spec.base = 16;
+ len = snprintf(sym, sizeof(sym), "0x%lx", value);
- return number(buf, end, value, spec);
+ mod = __module_address(value);
+ if (mod)
+ snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]",
+ mod->name, mod->module_core, mod->core_size);
#endif
+ return string(buf, end, sym, spec);
}
static noinline_for_stack

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,51 @@
--- a/tools/include/tools/be_byteshift.h
+++ b/tools/include/tools/be_byteshift.h
@@ -1,6 +1,10 @@
#ifndef _TOOLS_BE_BYTESHIFT_H
#define _TOOLS_BE_BYTESHIFT_H
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
#include <stdint.h>
static inline uint16_t __get_unaligned_be16(const uint8_t *p)
--- a/tools/include/tools/le_byteshift.h
+++ b/tools/include/tools/le_byteshift.h
@@ -1,6 +1,10 @@
#ifndef _TOOLS_LE_BYTESHIFT_H
#define _TOOLS_LE_BYTESHIFT_H
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
#include <stdint.h>
static inline uint16_t __get_unaligned_le16(const uint8_t *p)
--- /dev/null
+++ b/tools/include/tools/linux_types.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_TYPES_H
+#define __LINUX_TYPES_H
+
+#include <stdint.h>
+
+typedef uint8_t __u8;
+typedef uint8_t __be8;
+typedef uint8_t __le8;
+
+typedef uint16_t __u16;
+typedef uint16_t __be16;
+typedef uint16_t __le16;
+
+typedef uint32_t __u32;
+typedef uint32_t __be32;
+typedef uint32_t __le32;
+
+typedef uint64_t __u64;
+typedef uint64_t __be64;
+typedef uint64_t __le64;
+
+#endif

View file

@ -0,0 +1,11 @@
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -111,7 +111,7 @@ struct spi_ioc_transfer {
/* not all platforms use <asm-generic/ioctl.h> or _IOC_TYPECHECK() ... */
#define SPI_MSGSIZE(N) \
- ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \
+ ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << 13)) \
? ((N)*(sizeof (struct spi_ioc_transfer))) : 0)
#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)])

View file

@ -0,0 +1,536 @@
From: Felix Fietkau <nbd@openwrt.org>
use -ffunction-sections, -fdata-sections and --gc-sections
In combination with kernel symbol export stripping this significantly reduces
the kernel image size. Used on both ARM and MIPS architectures.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
#
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
cflags-y += -msoft-float
-LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+ifndef CONFIG_FUNCTION_TRACER
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+endif
+
#
# pass -msoft-float to GAS if it supports it. However on newer binutils
# (specifically newer than 2.24.51.20140728) we then also need to explicitly
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -67,7 +67,7 @@ SECTIONS
/* Exception table for data bus errors */
__dbe_table : {
__start___dbe_table = .;
- *(__dbe_table)
+ KEEP(*(__dbe_table))
__stop___dbe_table = .;
}
@@ -112,7 +112,7 @@ SECTIONS
. = ALIGN(4);
.mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
__mips_machines_start = .;
- *(.mips.machines.init)
+ KEEP(*(.mips.machines.init))
__mips_machines_end = .;
}
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -89,7 +89,7 @@
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#define MCOUNT_REC() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_mcount_loc) = .; \
- *(__mcount_loc) \
+ KEEP(*(__mcount_loc)) \
VMLINUX_SYMBOL(__stop_mcount_loc) = .;
#else
#define MCOUNT_REC()
@@ -97,7 +97,7 @@
#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
- *(_ftrace_annotated_branch) \
+ KEEP(*(_ftrace_annotated_branch)) \
VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
#else
#define LIKELY_PROFILE()
@@ -105,7 +105,7 @@
#ifdef CONFIG_PROFILE_ALL_BRANCHES
#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
- *(_ftrace_branch) \
+ KEEP(*(_ftrace_branch)) \
VMLINUX_SYMBOL(__stop_branch_profile) = .;
#else
#define BRANCH_PROFILE()
@@ -114,7 +114,7 @@
#ifdef CONFIG_KPROBES
#define KPROBE_BLACKLIST() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
- *(_kprobe_blacklist) \
+ KEEP(*(_kprobe_blacklist)) \
VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
#else
#define KPROBE_BLACKLIST()
@@ -123,10 +123,10 @@
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
- *(_ftrace_events) \
+ KEEP(*(_ftrace_events)) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
- *(_ftrace_enum_map) \
+ KEEP(*(_ftrace_enum_map)) \
VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
#else
#define FTRACE_EVENTS()
@@ -134,7 +134,7 @@
#ifdef CONFIG_TRACING
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
- *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
*(__tracepoint_str) /* Trace_printk fmt' pointer */ \
@@ -147,7 +147,7 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
- *(__syscalls_metadata) \
+ KEEP(*(__syscalls_metadata)) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
#define TRACE_SYSCALLS()
@@ -169,8 +169,8 @@
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__##name##_of_table) = .; \
- *(__##name##_of_table) \
- *(__##name##_of_table_end)
+ KEEP(*(__##name##_of_table)) \
+ KEEP(*(__##name##_of_table_end))
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
@@ -184,7 +184,7 @@
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
- *(.dtb.init.rodata) \
+ KEEP(*(.dtb.init.rodata)) \
VMLINUX_SYMBOL(__dtb_end) = .;
/* .data section */
@@ -200,16 +200,17 @@
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___jump_table) = .; \
- *(__jump_table) \
+ KEEP(*(__jump_table)) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
- *(__verbose) \
+ KEEP(*(__verbose)) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ *(.data.[a-zA-Z_]*)
/*
* Data section helpers
@@ -263,35 +264,35 @@
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
- *(.pci_fixup_early) \
+ KEEP(*(.pci_fixup_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
- *(.pci_fixup_header) \
+ KEEP(*(.pci_fixup_header)) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
- *(.pci_fixup_final) \
+ KEEP(*(.pci_fixup_final)) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
- *(.pci_fixup_enable) \
+ KEEP(*(.pci_fixup_enable)) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
- *(.pci_fixup_resume) \
+ KEEP(*(.pci_fixup_resume)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
- *(.pci_fixup_resume_early) \
+ KEEP(*(.pci_fixup_resume_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
- *(.pci_fixup_suspend) \
+ KEEP(*(.pci_fixup_suspend)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
- *(.pci_fixup_suspend_late) \
+ KEEP(*(.pci_fixup_suspend_late)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
- *(.builtin_fw) \
+ KEEP(*(.builtin_fw)) \
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
} \
\
@@ -300,49 +301,49 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(SORT(___ksymtab+*)) \
+ KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(SORT(___ksymtab_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(SORT(___ksymtab_unused+*)) \
+ KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(SORT(___ksymtab_unused_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(SORT(___ksymtab_gpl_future+*)) \
+ KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(SORT(___kcrctab+*)) \
+ KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(SORT(___kcrctab_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
@@ -356,14 +357,14 @@
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(SORT(___kcrctab_unused_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(SORT(___kcrctab_gpl_future+*)) \
+ KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
@@ -382,14 +383,14 @@
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
- *(__param) \
+ KEEP(*(__param)) \
VMLINUX_SYMBOL(__stop___param) = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___modver) = .; \
- *(__modver) \
+ KEEP(*(__modver)) \
VMLINUX_SYMBOL(__stop___modver) = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
@@ -443,7 +444,7 @@
#define ENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__entry_text_start) = .; \
- *(.entry.text) \
+ KEEP(*(.entry.text)) \
VMLINUX_SYMBOL(__entry_text_end) = .;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -471,7 +472,7 @@
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ex_table) = .; \
- *(__ex_table) \
+ KEEP(*(__ex_table)) \
VMLINUX_SYMBOL(__stop___ex_table) = .; \
}
@@ -487,9 +488,9 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
- *(.ctors) \
+ KEEP(*(.ctors)) \
*(SORT(.init_array.*)) \
- *(.init_array) \
+ KEEP(*(.init_array)) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
#define KERNEL_CTORS()
@@ -540,7 +541,7 @@
#define SBSS(sbss_align) \
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
- *(.sbss) \
+ *(.sbss .sbss.*) \
*(.scommon) \
}
@@ -558,7 +559,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
- *(.bss) \
+ *(.bss .bss.*) \
*(COMMON) \
}
@@ -607,7 +608,7 @@
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___bug_table) = .; \
- *(__bug_table) \
+ KEEP(*(__bug_table)) \
VMLINUX_SYMBOL(__stop___bug_table) = .; \
}
#else
@@ -619,7 +620,7 @@
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__tracedata_start) = .; \
- *(.tracedata) \
+ KEEP(*(.tracedata)) \
VMLINUX_SYMBOL(__tracedata_end) = .; \
}
#else
@@ -636,17 +637,17 @@
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
VMLINUX_SYMBOL(__setup_start) = .; \
- *(.init.setup) \
+ KEEP(*(.init.setup)) \
VMLINUX_SYMBOL(__setup_end) = .;
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
- *(.initcall##level##.init) \
- *(.initcall##level##s.init) \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
- *(.initcallearly.init) \
+ KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
INIT_CALLS_LEVEL(2) \
@@ -660,21 +661,21 @@
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
- *(.con_initcall.init) \
+ KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
- *(.init.ramfs) \
+ KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
- *(.init.ramfs.info)
+ KEEP(*(.init.ramfs.info))
#else
#define INIT_RAM_FS
#endif
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -22,11 +22,16 @@ endif
ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
endif
+LDFLAGS_vmlinux += --gc-sections
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
+ifndef CONFIG_FUNCTION_TRACER
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+endif
+
# Never generate .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -15,13 +15,13 @@
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
+ KEEP(*(.proc.info.init)) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
+ KEEP(*(.idmap.text)) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
@@ -102,7 +102,7 @@ SECTIONS
_stext = .; /* Text and read-only data */
IDMAP_TEXT
__exception_text_start = .;
- *(.exception.text)
+ KEEP(*(.exception.text))
__exception_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
@@ -126,7 +126,7 @@ SECTIONS
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
#ifdef CONFIG_MMU
- *(__ex_table)
+ KEEP(*(__ex_table))
#endif
__stop___ex_table = .;
}
@@ -138,12 +138,12 @@ SECTIONS
. = ALIGN(8);
.ARM.unwind_idx : {
__start_unwind_idx = .;
- *(.ARM.exidx*)
+ KEEP(*(.ARM.exidx*))
__stop_unwind_idx = .;
}
.ARM.unwind_tab : {
__start_unwind_tab = .;
- *(.ARM.extab*)
+ KEEP(*(.ARM.extab*))
__stop_unwind_tab = .;
}
#endif
@@ -166,14 +166,14 @@ SECTIONS
*/
__vectors_start = .;
.vectors 0 : AT(__vectors_start) {
- *(.vectors)
+ KEEP(*(.vectors))
}
. = __vectors_start + SIZEOF(.vectors);
__vectors_end = .;
__stubs_start = .;
.stubs 0x1000 : AT(__stubs_start) {
- *(.stubs)
+ KEEP(*(.stubs))
}
. = __stubs_start + SIZEOF(.stubs);
__stubs_end = .;
@@ -187,24 +187,24 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
- *(.taglist.init)
+ KEEP(*(.taglist.init))
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
- *(.alt.smp.init)
+ KEEP(*(.alt.smp.init))
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
- *(.pv_table)
+ KEEP(*(.pv_table))
__pv_table_end = .;
}
.init.data : {
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -105,6 +105,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
+KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -DZIMAGE

View file

@ -0,0 +1,88 @@
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -54,6 +54,16 @@
#define LOAD_OFFSET 0
#endif
+#ifndef SYMTAB_KEEP
+#define SYMTAB_KEEP KEEP(*(SORT(___ksymtab+*)))
+#define SYMTAB_KEEP_GPL KEEP(*(SORT(___ksymtab_gpl+*)))
+#endif
+
+#ifndef SYMTAB_DISCARD
+#define SYMTAB_DISCARD
+#define SYMTAB_DISCARD_GPL
+#endif
+
#include <linux/export.h>
/* Align . to a 8 byte boundary equals to maximum function alignment. */
@@ -301,14 +311,14 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- KEEP(*(SORT(___ksymtab+*))) \
+ SYMTAB_KEEP \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- KEEP(*(SORT(___ksymtab_gpl+*))) \
+ SYMTAB_KEEP_GPL \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
@@ -370,7 +380,7 @@
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ *(__ksymtab_strings+*) \
} \
\
/* __*init sections */ \
@@ -694,6 +704,8 @@
EXIT_TEXT \
EXIT_DATA \
EXIT_CALL \
+ SYMTAB_DISCARD \
+ SYMTAB_DISCARD_GPL \
*(.discard) \
*(.discard.*) \
}
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -299,7 +299,7 @@ targets += $(extra-y) $(MAKECMDGOALS) $(
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
quiet_cmd_cpp_lds_S = LDS $@
- cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \
+ cmd_cpp_lds_S = $(CPP) $(EXTRA_LDSFLAGS) $(cpp_flags) -P -C -U$(ARCH) \
-D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
$(obj)/%.lds: $(src)/%.lds.S FORCE
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -52,12 +52,19 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef MODULE
+#define __EXPORT_SUFFIX(sym)
+#else
+#define __EXPORT_SUFFIX(sym) "+" #sym
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define __EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings" \
+ __EXPORT_SUFFIX(sym)), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
extern const struct kernel_symbol __ksymtab_##sym; \
__visible const struct kernel_symbol __ksymtab_##sym \

View file

@ -0,0 +1,58 @@
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -324,7 +324,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^)
quiet_cmd_lzma = LZMA $@
cmd_lzma = (cat $(filter-out FORCE,$^) | \
- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
quiet_cmd_lzo = LZO $@
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -226,7 +226,7 @@ cpio_list=
output="/dev/stdout"
output_file=""
is_cpio_compressed=
-compr="gzip -n -9 -f"
+compr="gzip -n -9 -f -"
arg="$1"
case "$arg" in
@@ -242,13 +242,13 @@ case "$arg" in
output=${cpio_list}
echo "$output_file" | grep -q "\.gz$" \
&& [ -x "`which gzip 2> /dev/null`" ] \
- && compr="gzip -n -9 -f"
+ && compr="gzip -n -9 -f -"
echo "$output_file" | grep -q "\.bz2$" \
&& [ -x "`which bzip2 2> /dev/null`" ] \
- && compr="bzip2 -9 -f"
+ && compr="bzip2 -9 -f -"
echo "$output_file" | grep -q "\.lzma$" \
&& [ -x "`which lzma 2> /dev/null`" ] \
- && compr="lzma -9 -f"
+ && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
echo "$output_file" | grep -q "\.xz$" \
&& [ -x "`which xz 2> /dev/null`" ] \
&& compr="xz --check=crc32 --lzma2=dict=1MiB"
@@ -315,7 +315,7 @@ if [ ! -z ${output_file} ]; then
if [ "${is_cpio_compressed}" = "compressed" ]; then
cat ${cpio_tfile} > ${output_file}
else
- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \
+ (cat ${cpio_tfile} | ${compr} > ${output_file}) \
|| (rm -f ${output_file} ; false)
fi
[ -z ${cpio_file} ] && rm ${cpio_tfile}
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -48,6 +48,7 @@ static const struct compress_format comp
{ {0x1f, 0x9e}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
+ { {0x6d, 0x00}, "lzma-openwrt", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },

View file

@ -0,0 +1,18 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -218,7 +218,6 @@ config NF_CONNTRACK_FTP
config NF_CONNTRACK_H323
tristate "H.323 protocol support"
- depends on IPV6 || IPV6=n
depends on NETFILTER_ADVANCED
help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
@@ -929,7 +928,6 @@ config NETFILTER_XT_TARGET_SECMARK
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
- depends on IPV6 || IPV6=n
default m if NETFILTER_ADVANCED=n
---help---
This option adds a `TCPMSS' target, which allows you to alter the

View file

@ -0,0 +1,18 @@
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -16,13 +16,13 @@ config SND_DMAENGINE_PCM
tristate
config SND_HWDEP
- tristate
+ tristate "Sound hardware support"
config SND_RAWMIDI
tristate
config SND_COMPRESS_OFFLOAD
- tristate
+ tristate "Compression offloading support"
# To be effective this also requires INPUT - users should say:
# select SND_JACK if INPUT=y || INPUT=SND

View file

@ -0,0 +1,10 @@
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -163,6 +163,7 @@ config CRYPTO_DEV_MV_CESA
tristate "Marvell's Cryptographic Engine"
depends on PLAT_ORION
select CRYPTO_AES
+ select CRYPTO_HASH2
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select SRAM

View file

@ -0,0 +1,29 @@
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -29,6 +29,7 @@ config SSB_SPROM
config SSB_BLOCKIO
bool
depends on SSB
+ default y
config SSB_PCIHOST_POSSIBLE
bool
@@ -49,7 +50,7 @@ config SSB_PCIHOST
config SSB_B43_PCI_BRIDGE
bool
depends on SSB_PCIHOST
- default n
+ default y
config SSB_PCMCIAHOST_POSSIBLE
bool
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -17,6 +17,7 @@ config BCMA
config BCMA_BLOCKIO
bool
depends on BCMA
+ default y
config BCMA_HOST_PCI_POSSIBLE
bool

View file

@ -0,0 +1,23 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -331,16 +331,16 @@ config BCH_CONST_T
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
- bool
+ boolean "Textsearch support"
config TEXTSEARCH_KMP
- tristate
+ tristate "Textsearch KMP"
config TEXTSEARCH_BM
- tristate
+ tristate "Textsearch BM"
config TEXTSEARCH_FSM
- tristate
+ tristate "Textsearch FSM"
config BTREE
bool

View file

@ -0,0 +1,31 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -191,7 +191,7 @@ config CFG80211_WEXT_EXPORT
wext compatibility symbols to be exported.
config LIB80211
- tristate
+ tristate "LIB80211"
default n
help
This options enables a library of common routines used
@@ -200,13 +200,16 @@ config LIB80211
Drivers should select this themselves if needed.
config LIB80211_CRYPT_WEP
- tristate
+ tristate "LIB80211_CRYPT_WEP"
+ select LIB80211
config LIB80211_CRYPT_CCMP
- tristate
+ tristate "LIB80211_CRYPT_CCMP"
+ select LIB80211
config LIB80211_CRYPT_TKIP
- tristate
+ tristate "LIB80211_CRYPT_TKIP"
+ select LIB80211
config LIB80211_DEBUG
bool "lib80211 debugging messages"

View file

@ -0,0 +1,47 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -32,7 +32,7 @@ config CRYPTO_FIPS
this is.
config CRYPTO_ALGAPI
- tristate
+ tristate "ALGAPI"
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
@@ -41,7 +41,7 @@ config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
- tristate
+ tristate "AEAD"
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
@@ -52,7 +52,7 @@ config CRYPTO_AEAD2
select CRYPTO_RNG2
config CRYPTO_BLKCIPHER
- tristate
+ tristate "BLKCIPHER"
select CRYPTO_BLKCIPHER2
select CRYPTO_ALGAPI
@@ -63,7 +63,7 @@ config CRYPTO_BLKCIPHER2
select CRYPTO_WORKQUEUE
config CRYPTO_HASH
- tristate
+ tristate "HASH"
select CRYPTO_HASH2
select CRYPTO_ALGAPI
@@ -72,7 +72,7 @@ config CRYPTO_HASH2
select CRYPTO_ALGAPI2
config CRYPTO_RNG
- tristate
+ tristate "RNG"
select CRYPTO_RNG2
select CRYPTO_ALGAPI

View file

@ -0,0 +1,22 @@
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,5 @@
config WIRELESS_EXT
- bool
+ bool "Wireless extensions"
config WEXT_CORE
def_bool y
@@ -11,10 +11,10 @@ config WEXT_PROC
depends on WEXT_CORE
config WEXT_SPY
- bool
+ bool "WEXT_SPY"
config WEXT_PRIV
- bool
+ bool "WEXT_PRIV"
config CFG80211
tristate "cfg80211 - wireless configuration API"

View file

@ -0,0 +1,11 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -10,7 +10,7 @@ config NETFILTER_INGRESS
infrastructure.
config NETFILTER_NETLINK
- tristate
+ tristate "Netfilter NFNETLINK interface"
config NETFILTER_NETLINK_ACCT
tristate "Netfilter NFACCT over NFNETLINK interface"

View file

@ -0,0 +1,87 @@
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,29 +3,35 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
- bool
+ tristate "Regmap"
config REGMAP_AC97
+ select REGMAP
tristate
config REGMAP_I2C
- tristate
+ tristate "Regmap I2C"
+ select REGMAP
depends on I2C
config REGMAP_SPI
- tristate
+ tristate "Regmap SPI"
+ select REGMAP
+ depends on SPI_MASTER
depends on SPI
config REGMAP_SPMI
+ select REGMAP
tristate
depends on SPMI
config REGMAP_MMIO
- tristate
+ tristate "Regmap MMIO"
+ select REGMAP
config REGMAP_IRQ
+ select REGMAP
bool
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -65,7 +65,7 @@ struct reg_sequence {
unsigned int delay_us;
};
-#ifdef CONFIG_REGMAP
+#if IS_ENABLED(CONFIG_REGMAP)
enum regmap_endian {
/* Unspecified -> 0 -> Backwards compatible default */
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,9 +1,11 @@
# For include/trace/define_trace.h to include trace.h
CFLAGS_regmap.o := -I$(src)
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-lzo.o regcache-flat.o
+ifdef CONFIG_DEBUG_FS
+regmap-core-objs += regmap-debugfs.o
+endif
+obj-$(CONFIG_REGMAP) += regmap-core.o
obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -2852,3 +2853,5 @@ static int __init regmap_initcall(void)
return 0;
}
postcore_initcall(regmap_initcall);
+
+MODULE_LICENSE("GPL");

View file

@ -0,0 +1,38 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -119,11 +119,11 @@ config CRYPTO_MANAGER
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
- select CRYPTO_AEAD2
- select CRYPTO_HASH2
- select CRYPTO_BLKCIPHER2
- select CRYPTO_PCOMP2
- select CRYPTO_AKCIPHER2
+ select CRYPTO_AEAD2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_HASH2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_PCOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -248,12 +248,16 @@ static int cryptomgr_schedule_test(struc
type = alg->cra_flags;
/* This piece of crap needs to disappear into per-type test hooks. */
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ type |= CRYPTO_ALG_TESTED;
+#else
if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
alg->cra_ablkcipher.ivsize))
type |= CRYPTO_ALG_TESTED;
+#endif
param->type = type;

View file

@ -0,0 +1,35 @@
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -216,25 +216,25 @@ config 842_DECOMPRESS
tristate
config ZLIB_INFLATE
- tristate
+ tristate "ZLIB inflate support"
config ZLIB_DEFLATE
- tristate
+ tristate "ZLIB deflate support"
config LZO_COMPRESS
- tristate
+ tristate "LZO compress support"
config LZO_DECOMPRESS
- tristate
+ tristate "LZO decompress support"
config LZ4_COMPRESS
- tristate
+ tristate "LZ4 compress support"
config LZ4HC_COMPRESS
- tristate
+ tristate "LZ4HC compress support"
config LZ4_DECOMPRESS
- tristate
+ tristate "LZ4 decompress support"
source "lib/xz/Kconfig"

View file

@ -0,0 +1,34 @@
From 8b05e325824d3b38e52a7748b3b5dc34dc1c0f6d Mon Sep 17 00:00:00 2001
From: David Heidelberger <david.heidelberger@ixit.cz>
Date: Mon, 29 Jun 2015 14:37:54 +0200
Subject: [PATCH 1/3] uapi/kernel.h: glibc specific inclusion of sysinfo.h
including sysinfo.h from kernel.h makes no sense whatsoever,
but removing it breaks glibc's userspace header,
which includes kernel.h instead of sysinfo.h from their sys/sysinfo.h.
this seems to be a historical mistake.
on musl, including any header that uses kernel.h directly or indirectly
plus sys/sysinfo.h will produce a compile error due to redefinition of
struct sysinfo from sys/sysinfo.h.
so for now, only include it on glibc or when including from kernel
in order not to break their headers.
Signed-off-by: John Spencer <maillist-linux@barfooze.de>
Signed-off-by: David Heidelberger <david.heidelberger@ixit.cz>
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
---
include/uapi/linux/kernel.h | 2 ++
1 file changed, 2 insertions(+)
--- a/include/uapi/linux/kernel.h
+++ b/include/uapi/linux/kernel.h
@@ -1,7 +1,9 @@
#ifndef _UAPI_LINUX_KERNEL_H
#define _UAPI_LINUX_KERNEL_H
+#if defined(__KERNEL__) || defined( __GLIBC__)
#include <linux/sysinfo.h>
+#endif
/*
* 'kernel.h' contains some often-used function prototypes etc

View file

@ -0,0 +1,81 @@
From f972afc2509eebcb00d370256c55b112a3b5ffca Mon Sep 17 00:00:00 2001
From: David Heidelberger <david.heidelberger@ixit.cz>
Date: Mon, 29 Jun 2015 16:50:40 +0200
Subject: [PATCH 2/3] uapi/libc-compat.h: do not rely on __GLIBC__
Musl provides the same structs as glibc, but does not provide a define to
allow its detection. Since the absence of __GLIBC__ also can mean that it
is included from the kernel, change the __GLIBC__ detection to
!__KERNEL__, which should always be true when included from userspace.
Signed-off-by: John Spencer <maillist-linux@barfooze.de>
Tested-by: David Heidelberger <david.heidelberger@ixit.cz>
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
---
include/uapi/linux/libc-compat.h | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -48,13 +48,13 @@
#ifndef _UAPI_LIBC_COMPAT_H
#define _UAPI_LIBC_COMPAT_H
-/* We have included glibc headers... */
-#if defined(__GLIBC__)
+/* We have included libc headers... */
+#if !defined(__KERNEL__)
-/* Coordinate with glibc netinet/in.h header. */
+/* Coordinate with libc netinet/in.h header. */
#if defined(_NETINET_IN_H)
-/* GLIBC headers included first so don't define anything
+/* LIBC headers included first so don't define anything
* that would already be defined. */
#define __UAPI_DEF_IN_ADDR 0
#define __UAPI_DEF_IN_IPPROTO 0
@@ -68,7 +68,7 @@
* if the glibc code didn't define them. This guard matches
* the guard in glibc/inet/netinet/in.h which defines the
* additional in6_addr macros e.g. s6_addr16, and s6_addr32. */
-#if defined(__USE_MISC) || defined (__USE_GNU)
+#if !defined(__GLIBC__) || defined(__USE_MISC) || defined (__USE_GNU)
#define __UAPI_DEF_IN6_ADDR_ALT 0
#else
#define __UAPI_DEF_IN6_ADDR_ALT 1
@@ -83,7 +83,7 @@
#else
/* Linux headers included first, and we must define everything
- * we need. The expectation is that glibc will check the
+ * we need. The expectation is that the libc will check the
* __UAPI_DEF_* defines and adjust appropriately. */
#define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1
@@ -93,7 +93,7 @@
#define __UAPI_DEF_IN_CLASS 1
#define __UAPI_DEF_IN6_ADDR 1
-/* We unconditionally define the in6_addr macros and glibc must
+/* We unconditionally define the in6_addr macros and the libc must
* coordinate. */
#define __UAPI_DEF_IN6_ADDR_ALT 1
#define __UAPI_DEF_SOCKADDR_IN6 1
@@ -115,7 +115,7 @@
/* If we did not see any headers from any supported C libraries,
* or we are being included in the kernel, then define everything
* that we need. */
-#else /* !defined(__GLIBC__) */
+#else /* defined(__KERNEL__) */
/* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1
@@ -138,6 +138,6 @@
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
-#endif /* __GLIBC__ */
+#endif /* __KERNEL__ */
#endif /* _UAPI_LIBC_COMPAT_H */

View file

@ -0,0 +1,67 @@
From fcbb6fed85ea9ff4feb4f1ebd4f0f235fdaf06b6 Mon Sep 17 00:00:00 2001
From: David Heidelberger <david.heidelberger@ixit.cz>
Date: Mon, 29 Jun 2015 16:53:03 +0200
Subject: [PATCH 3/3] uapi/if_ether.h: prevent redefinition of struct ethhdr
Musl provides its own ethhdr struct definition. Add a guard to prevent
its definition of the appropriate musl header has already been included.
Signed-off-by: John Spencer <maillist-linux@barfooze.de>
Tested-by: David Heidelberger <david.heidelberger@ixit.cz>
Signed-off-by: Jonas Gorski <jogo@openwrt.org>
---
include/uapi/linux/if_ether.h | 3 +++
include/uapi/linux/libc-compat.h | 11 +++++++++++
2 files changed, 14 insertions(+)
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -22,6 +22,7 @@
#define _UAPI_LINUX_IF_ETHER_H
#include <linux/types.h>
+#include <linux/libc-compat.h>
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -135,11 +136,13 @@
* This is an Ethernet frame header.
*/
+#if __UAPI_DEF_ETHHDR
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
+#endif
#endif /* _UAPI_LINUX_IF_ETHER_H */
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -51,6 +51,14 @@
/* We have included libc headers... */
#if !defined(__KERNEL__)
+/* musl defines the ethhdr struct itself in its netinet/if_ether.h.
+ * Glibc just includes the kernel header and uses a different guard. */
+#if defined(_NETINET_IF_ETHER_H)
+#define __UAPI_DEF_ETHHDR 0
+#else
+#define __UAPI_DEF_ETHHDR 1
+#endif
+
/* Coordinate with libc netinet/in.h header. */
#if defined(_NETINET_IN_H)
@@ -117,6 +125,9 @@
* that we need. */
#else /* defined(__KERNEL__) */
+/* Definitions for if_ether.h */
+#define __UAPI_DEF_ETHHDR 1
+
/* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1

View file

@ -0,0 +1,39 @@
From: Mark Miller <mark@mirell.org>
This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
certain Broadcom chipsets running CFE in order to load the kernel.
Signed-off-by: Mark Miller <mark@mirell.org>
Acked-by: Rob Landley <rob@landley.net>
---
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1003,9 +1003,6 @@ config FW_ARC
config ARCH_MAY_HAVE_PC_FDC
bool
-config BOOT_RAW
- bool
-
config CEVT_BCM1480
bool
@@ -2733,6 +2730,18 @@ choice
if you don't intend to always append a DTB.
endchoice
+config BOOT_RAW
+ bool "Enable the kernel to be executed from the load address"
+ default n
+ help
+ Allow the kernel to be executed from the load address for
+ bootloaders which cannot read the ELF format. This places
+ a jump to start_kernel at the load address.
+
+ If unsure, say N.
+
+
+
endmenu
config LOCKDEP_SUPPORT

View file

@ -0,0 +1,28 @@
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1091,6 +1091,10 @@ config SYNC_R4K
config MIPS_MACHINE
def_bool n
+config IMAGE_CMDLINE_HACK
+ bool "OpenWrt specific image command line hack"
+ default n
+
config NO_IOPORT_MAP
def_bool n
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -80,6 +80,12 @@ FEXPORT(__kernel_entry)
j kernel_entry
#endif
+#ifdef CONFIG_IMAGE_CMDLINE_HACK
+ .ascii "CMDLINE:"
+EXPORT(__image_cmdline)
+ .fill 0x400
+#endif /* CONFIG_IMAGE_CMDLINE_HACK */
+
__REF
NESTED(kernel_entry, 16, sp) # kernel entry point

View file

@ -0,0 +1,11 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -87,7 +87,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
# machines may also. Since BFD is incredibly buggy with respect to
# crossformat linking we rely on the elf2ecoff tool for format conversion.
#
-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
+cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
KBUILD_AFLAGS_MODULE += -mlong-calls

View file

@ -0,0 +1,106 @@
From: Manuel Lauss <manuel.lauss@gmail.com>
Subject: [RFC PATCH v4 2/2] MIPS: make FPU emulator optional
Date: Mon, 7 Apr 2014 12:57:04 +0200
Message-Id: <1396868224-252888-2-git-send-email-manuel.lauss@gmail.com>
This small patch makes the MIPS FPU emulator optional. The kernel
kills float-users on systems without a hardware FPU by sending a SIGILL.
Disabling the emulator shrinks vmlinux by about 54kBytes (32bit,
optimizing for size).
Signed-off-by: Manuel Lauss <manuel.lauss@gmail.com>
---
v4: rediffed because of patch 1/2, should now work with micromips as well
v3: updated patch description with size savings.
v2: incorporated changes suggested by Jonas Gorski
force the fpu emulator on for micromips: relocating the parts
of the mmips code in the emulator to other areas would be a
much larger change; I went the cheap route instead with this.
arch/mips/Kbuild | 2 +-
arch/mips/Kconfig | 14 ++++++++++++++
arch/mips/include/asm/fpu.h | 5 +++--
arch/mips/include/asm/fpu_emulator.h | 15 +++++++++++++++
4 files changed, 33 insertions(+), 3 deletions(-)
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2680,6 +2680,20 @@ config MIPS_O32_FP64_SUPPORT
If unsure, say N.
+config MIPS_FPU_EMULATOR
+ bool "MIPS FPU Emulator"
+ default y
+ help
+ This option lets you disable the built-in MIPS FPU (Coprocessor 1)
+ emulator, which handles floating-point instructions on processors
+ without a hardware FPU. It is generally a good idea to keep the
+ emulator built-in, unless you are perfectly sure you have a
+ complete soft-float environment. With the emulator disabled, all
+ users of float operations will be killed with an illegal instr-
+ uction exception.
+
+ Say Y, please.
+
config USE_OF
bool
select OF
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -285,7 +285,7 @@ OBJCOPYFLAGS += --remove-section=.regin
head-y := arch/mips/kernel/head.o
libs-y += arch/mips/lib/
-libs-y += arch/mips/math-emu/
+libs-$(CONFIG_MIPS_FPU_EMULATOR) += arch/mips/math-emu/
# See arch/mips/Kbuild for content of core part of the kernel
core-y += arch/mips/
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -223,8 +223,10 @@ static inline int init_fpu(void)
/* Restore FRE */
write_c0_config5(config5);
enable_fpu_hazard();
- } else
+ } else if (IS_ENABLED(CONFIG_MIPS_FPU_EMULATOR))
fpu_emulator_init_fpu();
+ else
+ ret = SIGILL;
return ret;
}
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -30,6 +30,7 @@
#include <asm/local.h>
#include <asm/processor.h>
+#ifdef CONFIG_MIPS_FPU_EMULATOR
#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
@@ -66,6 +67,21 @@ extern int do_dsemulret(struct pt_regs *
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+#else /* no CONFIG_MIPS_FPU_EMULATOR */
+static inline int do_dsemulret(struct pt_regs *xcp)
+{
+ return 0; /* 0 means error, should never get here anyway */
+}
+
+static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx, int has_fpu,
+ void *__user *fault_addr)
+{
+ *fault_addr = NULL;
+ return SIGILL; /* we don't speak MIPS FPU */
+}
+#endif /* CONFIG_MIPS_FPU_EMULATOR */
+
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,

View file

@ -0,0 +1,352 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -90,8 +90,13 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
cflags-y += -msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
+ifdef CONFIG_64BIT
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+else
+KBUILD_AFLAGS_MODULE += -mno-long-calls
+KBUILD_CFLAGS_MODULE += -mno-long-calls
+endif
ifndef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -11,6 +11,11 @@ struct mod_arch_specific {
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
struct mips_hi16 *r_mips_hi16_list;
+
+ void *phys_plt_tbl;
+ void *virt_plt_tbl;
+ unsigned int phys_plt_offset;
+ unsigned int virt_plt_offset;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -43,14 +43,221 @@ struct mips_hi16 {
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
-#ifdef MODULE_START
+/*
+ * Get the potential max trampolines size required of the init and
+ * non-init sections. Only used if we cannot find enough contiguous
+ * physically mapped memory to put the module into.
+ */
+static unsigned int
+get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ const char *secstrings, unsigned int symindex, bool is_init)
+{
+ unsigned long ret = 0;
+ unsigned int i, j;
+ Elf_Sym *syms;
+
+ /* Everything marked ALLOC (this includes the exported symbols) */
+ for (i = 1; i < hdr->e_shnum; ++i) {
+ unsigned int info = sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_REL
+ && sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ /* If it's called *.init*, and we're not init, we're
+ not interested */
+ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
+ != is_init)
+ continue;
+
+ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
+ if (sechdrs[i].sh_type == SHT_REL) {
+ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rel[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ } else {
+ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
+ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
+
+ for (j = 0; j < size; ++j) {
+ Elf_Sym *sym;
+
+ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
+ continue;
+
+ sym = syms + ELF_MIPS_R_SYM(rela[j]);
+ if (!is_init && sym->st_shndx != SHN_UNDEF)
+ continue;
+
+ ret += 4 * sizeof(int);
+ }
+ }
+ }
+
+ return ret;
+}
+
+#ifndef MODULE_START
+static void *alloc_phys(unsigned long size)
+{
+ unsigned order;
+ struct page *page;
+ struct page *p;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_THISNODE, order);
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ /* mark all pages except for the last one */
+ for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p)
+ set_bit(PG_owner_priv_1, &p->flags);
+
+ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
+ __free_page(p);
+
+ return page_address(page);
+}
+#endif
+
+static void free_phys(void *ptr)
+{
+ struct page *page;
+ bool free;
+
+ page = virt_to_page(ptr);
+ do {
+ free = test_and_clear_bit(PG_owner_priv_1, &page->flags);
+ __free_page(page);
+ page++;
+ } while (free);
+}
+
+
void *module_alloc(unsigned long size)
{
+#ifdef MODULE_START
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
+#else
+ void *ptr;
+
+ if (size == 0)
+ return NULL;
+
+ ptr = alloc_phys(size);
+
+ /* If we failed to allocate physically contiguous memory,
+ * fall back to regular vmalloc. The module loader code will
+ * create jump tables to handle long jumps */
+ if (!ptr)
+ return vmalloc(size);
+
+ return ptr;
+#endif
}
+
+static inline bool is_phys_addr(void *ptr)
+{
+#ifdef CONFIG_64BIT
+ return (KSEGX((unsigned long)ptr) == CKSEG0);
+#else
+ return (KSEGX(ptr) == KSEG0);
#endif
+}
+
+/* Free memory returned from module_alloc */
+void module_memfree(void *module_region)
+{
+ if (is_phys_addr(module_region))
+ free_phys(module_region);
+ else
+ vfree(module_region);
+}
+
+static void *__module_alloc(int size, bool phys)
+{
+ void *ptr;
+
+ if (phys)
+ ptr = kmalloc(size, GFP_KERNEL);
+ else
+ ptr = vmalloc(size);
+ return ptr;
+}
+
+static void __module_free(void *ptr)
+{
+ if (is_phys_addr(ptr))
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int symindex = 0;
+ unsigned int core_size, init_size;
+ int i;
+
+ mod->arch.phys_plt_offset = 0;
+ mod->arch.virt_plt_offset = 0;
+ mod->arch.phys_plt_tbl = NULL;
+ mod->arch.virt_plt_tbl = NULL;
+
+ if (IS_ENABLED(CONFIG_64BIT))
+ return 0;
+
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (sechdrs[i].sh_type == SHT_SYMTAB)
+ symindex = i;
+
+ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
+ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
+
+ if ((core_size + init_size) == 0)
+ return 0;
+
+ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
+ if (!mod->arch.phys_plt_tbl)
+ return -ENOMEM;
+
+ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
+ if (!mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
@@ -64,8 +271,39 @@ static int apply_r_mips_32_rel(struct mo
return 0;
}
+static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
+ void *start, Elf_Addr v)
+{
+ unsigned *tramp = start + *plt_offset;
+ *plt_offset += 4 * sizeof(int);
+
+ /* adjust carry for addiu */
+ if (v & 0x00008000)
+ v += 0x10000;
+
+ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
+ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
+ tramp[2] = 0x03200008; /* jr t9 */
+ tramp[3] = 0x00000000; /* nop */
+
+ return (Elf_Addr) tramp;
+}
+
+static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
+{
+ if (is_phys_addr(location))
+ return add_plt_entry_to(&me->arch.phys_plt_offset,
+ me->arch.phys_plt_tbl, v);
+ else
+ return add_plt_entry_to(&me->arch.virt_plt_offset,
+ me->arch.virt_plt_tbl, v);
+
+}
+
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
+ u32 ofs = *location & 0x03ffffff;
+
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
@@ -73,14 +311,17 @@ static int apply_r_mips_26_rel(struct mo
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
+ v = add_plt_entry(me, location, v + (ofs << 2));
+ if (!v) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+ ofs = 0;
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((ofs + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -287,9 +528,33 @@ int module_finalize(const Elf_Ehdr *hdr,
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
+
+ /* Get rid of the fixup trampoline if we're running the module
+ * from physically mapped address space */
+ if (me->arch.phys_plt_offset == 0) {
+ __module_free(me->arch.phys_plt_tbl);
+ me->arch.phys_plt_tbl = NULL;
+ }
+ if (me->arch.virt_plt_offset == 0) {
+ __module_free(me->arch.virt_plt_tbl);
+ me->arch.virt_plt_tbl = NULL;
+ }
+
return 0;
}
+void module_arch_freeing_init(struct module *mod)
+{
+ if (mod->arch.phys_plt_tbl) {
+ __module_free(mod->arch.phys_plt_tbl);
+ mod->arch.phys_plt_tbl = NULL;
+ }
+ if (mod->arch.virt_plt_tbl) {
+ __module_free(mod->arch.virt_plt_tbl);
+ mod->arch.virt_plt_tbl = NULL;
+ }
+}
+
void module_arch_cleanup(struct module *mod)
{
spin_lock_irq(&dbe_lock);

View file

@ -0,0 +1,83 @@
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+#define memset(__s, __c, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memset((__s), (__c), __len); \
+ else \
+ __ret = __builtin_memset((__s), (__c), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+#define memcpy(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memcpy((dst), (src), __len); \
+ else \
+ __ret = __builtin_memcpy((dst), (src), __len); \
+ __ret; \
+})
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+#define memmove(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = memmove((dst), (src), __len); \
+ else \
+ __ret = __builtin_memmove((dst), (src), __len); \
+ __ret; \
+})
+
+#define __HAVE_ARCH_MEMCMP
+#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
#endif /* _ASM_STRING_H */
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -4,7 +4,7 @@
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strlen_user.o strncpy_user.o \
- strnlen_user.o uncached.o
+ strnlen_user.o uncached.o memcmp.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
--- /dev/null
+++ b/arch/mips/lib/memcmp.c
@@ -0,0 +1,22 @@
+/*
+ * copied from linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+EXPORT_SYMBOL(memcmp);
+

View file

@ -0,0 +1,17 @@
Adjust highmem offset to 0x10000000 to ensure that all kmalloc allocations
stay within the same 256M boundary. This ensures that -mlong-calls is not
needed on systems with more than 256M RAM.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
---
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -44,7 +44,7 @@
* Memory above this physical address will be considered highmem.
*/
#ifndef HIGHMEM_START
-#define HIGHMEM_START _AC(0x20000000, UL)
+#define HIGHMEM_START _AC(0x10000000, UL)
#endif
#endif /* CONFIG_32BIT */

View file

@ -0,0 +1,32 @@
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -38,6 +38,7 @@ void (*__flush_cache_vunmap)(void);
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+EXPORT_SYMBOL(__flush_cache_all);
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
/* MIPS specific cache operations */
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,6 +19,9 @@
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
+#ifdef CONFIG_MIPS
+#include <asm/cacheflush.h>
+#endif
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -802,6 +805,9 @@ static int fuse_copy_fill(struct fuse_co
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
{
unsigned ncpy = min(*size, cs->len);
+#ifdef CONFIG_MIPS
+ __flush_cache_all();
+#endif
if (val) {
void *pgaddr = kmap_atomic(cs->pg);
void *buf = pgaddr + cs->offset;

View file

@ -0,0 +1,13 @@
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -88,6 +88,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
return -ENOEXEC;
}
+ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
+ ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {

View file

@ -0,0 +1,31 @@
Upstream doesn't optimize the kernel and bootwrappers for ppc44x because
they still want to support gcc 3.3 -- well, we don't.
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -207,7 +207,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
KBUILD_CFLAGS += -mno-sched-epilog
endif
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
+cpu-as-$(CONFIG_40x) += -Wa,-m405
+cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
cpu-as-$(CONFIG_E200) += -Wa,-me200
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -48,10 +48,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob
DTC_FLAGS ?= -p 1024
$(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
+$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
+$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405

View file

@ -0,0 +1,10 @@
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -169,7 +169,6 @@ CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
ifeq ($(CONFIG_476FPE_ERR46),y)
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \

View file

@ -0,0 +1,298 @@
From d8582dcf1ed66eee88a11e4760f42c0d6c8822be Mon Sep 17 00:00:00 2001
From: Yousong Zhou <yszhou4tech@gmail.com>
Date: Sat, 31 Jan 2015 22:26:03 +0800
Subject: [PATCH 331/331] MIPS: kexec: Accept command line parameters from
userspace.
Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
---
arch/mips/kernel/machine_kexec.c | 153 +++++++++++++++++++++++++++++++-----
arch/mips/kernel/machine_kexec.h | 20 +++++
arch/mips/kernel/relocate_kernel.S | 21 +++--
3 files changed, 167 insertions(+), 27 deletions(-)
create mode 100644 arch/mips/kernel/machine_kexec.h
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -10,45 +10,145 @@
#include <linux/mm.h>
#include <linux/delay.h>
+#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
-
-extern const unsigned char relocate_new_kernel[];
-extern const size_t relocate_new_kernel_size;
-
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
+#include <asm/uaccess.h>
+#include "machine_kexec.h"
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
void (*_machine_kexec_shutdown)(void) = NULL;
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+
#ifdef CONFIG_SMP
void (*relocated_kexec_smp_wait) (void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
-int
-machine_kexec_prepare(struct kimage *kimage)
+static void machine_kexec_print_args(void)
{
+ unsigned long argc = (int)kexec_args[0];
+ int i;
+
+ pr_info("kexec_args[0] (argc): %lu\n", argc);
+ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
+ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
+ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+
+ for (i = 0; i < argc; i++) {
+ pr_info("kexec_argv[%d] = %p, %s\n",
+ i, kexec_argv[i], kexec_argv[i]);
+ }
+}
+
+static void machine_kexec_init_argv(struct kimage *image)
+{
+ void __user *buf = NULL;
+ size_t bufsz;
+ size_t size;
+ int i;
+
+ bufsz = 0;
+ for (i = 0; i < image->nr_segments; i++) {
+ struct kexec_segment *seg;
+
+ seg = &image->segment[i];
+ if (seg->bufsz < 6)
+ continue;
+
+ if (strncmp((char *) seg->buf, "kexec ", 6))
+ continue;
+
+ buf = seg->buf;
+ bufsz = seg->bufsz;
+ break;
+ }
+
+ if (!buf)
+ return;
+
+ size = KEXEC_COMMAND_LINE_SIZE;
+ size = min(size, bufsz);
+ if (size < bufsz)
+ pr_warn("kexec command line truncated to %zd bytes\n", size);
+
+ /* Copy to kernel space */
+ copy_from_user(kexec_argv_buf, buf, size);
+ kexec_argv_buf[size - 1] = 0;
+}
+
+static void machine_kexec_parse_argv(struct kimage *image)
+{
+ char *reboot_code_buffer;
+ int reloc_delta;
+ char *ptr;
+ int argc;
+ int i;
+
+ ptr = kexec_argv_buf;
+ argc = 0;
+
+ /*
+ * convert command line string to array of parameters
+ * (as bootloader does).
+ */
+ while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) {
+ if (*ptr == ' ') {
+ *ptr++ = '\0';
+ continue;
+ }
+
+ kexec_argv[argc++] = ptr;
+ ptr = strchr(ptr, ' ');
+ }
+
+ if (!argc)
+ return;
+
+ kexec_args[0] = argc;
+ kexec_args[1] = (unsigned long)kexec_argv;
+ kexec_args[2] = 0;
+ kexec_args[3] = 0;
+
+ reboot_code_buffer = page_address(image->control_code_page);
+ reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel;
+
+ kexec_args[1] += reloc_delta;
+ for (i = 0; i < argc; i++)
+ kexec_argv[i] += reloc_delta;
+}
+
+int machine_kexec_prepare(struct kimage *kimage)
+{
+ /*
+ * Whenever arguments passed from kexec-tools, Init the arguments as
+ * the original ones to try avoiding booting failure.
+ */
+
+ kexec_args[0] = fw_arg0;
+ kexec_args[1] = fw_arg1;
+ kexec_args[2] = fw_arg2;
+ kexec_args[3] = fw_arg3;
+
+ machine_kexec_init_argv(kimage);
+ machine_kexec_parse_argv(kimage);
+
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
return 0;
}
-void
-machine_kexec_cleanup(struct kimage *kimage)
+void machine_kexec_cleanup(struct kimage *kimage)
{
}
-void
-machine_shutdown(void)
+void machine_shutdown(void)
{
if (_machine_kexec_shutdown)
_machine_kexec_shutdown();
}
-void
-machine_crash_shutdown(struct pt_regs *regs)
+void machine_crash_shutdown(struct pt_regs *regs)
{
if (_machine_crash_shutdown)
_machine_crash_shutdown(regs);
@@ -66,10 +166,12 @@ machine_kexec(struct kimage *image)
unsigned long *ptr;
reboot_code_buffer =
- (unsigned long)page_address(image->control_code_page);
+ (unsigned long)page_address(image->control_code_page);
+ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
kexec_start_address =
(unsigned long) phys_to_virt(image->start);
+ pr_info("kexec_start_address = %p\n", (void *)kexec_start_address);
if (image->type == KEXEC_TYPE_DEFAULT) {
kexec_indirection_page =
@@ -77,9 +179,19 @@ machine_kexec(struct kimage *image)
} else {
kexec_indirection_page = (unsigned long)&image->head;
}
+ pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page);
- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
- relocate_new_kernel_size);
+ pr_info("Where is memcpy: %p\n", memcpy);
+ pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n",
+ (void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end);
+ pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE,
+ (void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer);
+ memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel,
+ KEXEC_RELOCATE_NEW_KERNEL_SIZE);
+
+ pr_info("Before _print_args().\n");
+ machine_kexec_print_args();
+ pr_info("Before eval loop.\n");
/*
* The generic kexec code builds a page list with physical
@@ -98,15 +210,16 @@ machine_kexec(struct kimage *image)
/*
* we do not want to be bothered.
*/
+ pr_info("Before irq_disable.\n");
local_irq_disable();
- printk("Will call new kernel at %08lx\n", image->start);
- printk("Bye ...\n");
+ pr_info("Will call new kernel at %08lx\n", image->start);
+ pr_info("Bye ...\n");
__flush_cache_all();
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer +
- (void *)(kexec_smp_wait - relocate_new_kernel);
+ (void *)(kexec_smp_wait - kexec_relocate_new_kernel);
smp_wmb();
atomic_set(&kexec_ready_to_reboot, 1);
#endif
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.h
@@ -0,0 +1,20 @@
+#ifndef _MACHINE_KEXEC_H
+#define _MACHINE_KEXEC_H
+
+#ifndef __ASSEMBLY__
+extern const unsigned char kexec_relocate_new_kernel[];
+extern unsigned long kexec_relocate_new_kernel_end;
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+extern char kexec_argv_buf[];
+extern char *kexec_argv[];
+
+#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel)
+#endif /* !__ASSEMBLY__ */
+
+#define KEXEC_COMMAND_LINE_SIZE 256
+#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16)
+#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long))
+
+#endif
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -12,8 +12,9 @@
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
+#include "machine_kexec.h"
-LEAF(relocate_new_kernel)
+LEAF(kexec_relocate_new_kernel)
PTR_L a0, arg0
PTR_L a1, arg1
PTR_L a2, arg2
@@ -98,7 +99,7 @@ done:
#endif
/* jump to kexec_start_address */
j s1
- END(relocate_new_kernel)
+ END(kexec_relocate_new_kernel)
#ifdef CONFIG_SMP
/*
@@ -184,9 +185,15 @@ kexec_indirection_page:
PTR 0
.size kexec_indirection_page, PTRSIZE
-relocate_new_kernel_end:
+kexec_argv_buf:
+ EXPORT(kexec_argv_buf)
+ .skip KEXEC_COMMAND_LINE_SIZE
+ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
+
+kexec_argv:
+ EXPORT(kexec_argv)
+ .skip KEXEC_ARGV_SIZE
+ .size kexec_argv, KEXEC_ARGV_SIZE
-relocate_new_kernel_size:
- EXPORT(relocate_new_kernel_size)
- PTR relocate_new_kernel_end - relocate_new_kernel
- .size relocate_new_kernel_size, PTRSIZE
+kexec_relocate_new_kernel_end:
+ EXPORT(kexec_relocate_new_kernel_end)

View file

@ -0,0 +1,146 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -12,6 +12,23 @@ menuconfig MTD
if MTD
+menu "OpenWrt specific MTD options"
+
+config MTD_ROOTFS_ROOT_DEV
+ bool "Automatically set 'rootfs' partition to be root filesystem"
+ default y
+
+config MTD_SPLIT_FIRMWARE
+ bool "Automatically split firmware partition for kernel+rootfs"
+ default y
+
+config MTD_SPLIT_FIRMWARE_NAME
+ string "Firmware partition name"
+ depends on MTD_SPLIT_FIRMWARE
+ default "firmware"
+
+endmenu
+
config MTD_TESTS
tristate "MTD tests support (DANGEROUS)"
depends on m
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,11 +29,13 @@
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/magic.h>
#include <linux/of.h>
#include <linux/err.h>
#include <linux/kconfig.h>
#include "mtdcore.h"
+#include "mtdsplit/mtdsplit.h"
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -47,13 +49,14 @@ struct mtd_part {
struct list_head list;
};
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part);
+
/*
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
-
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
@@ -579,8 +582,10 @@ static int mtd_add_partition_attrs(struc
return ret;
}
-int mtd_add_partition(struct mtd_info *master, const char *name,
- long long offset, long long length)
+
+static int
+__mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length, bool dup_check)
{
struct mtd_partition part;
struct mtd_part *new;
@@ -612,6 +617,7 @@ int mtd_add_partition(struct mtd_info *m
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
+ mtd_partition_split(master, new);
mtd_add_partition_attrs(new);
@@ -619,6 +625,12 @@ int mtd_add_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
+int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length)
+{
+ return __mtd_add_partition(master, name, offset, length, true);
+}
+
int mtd_del_partition(struct mtd_info *master, int partno)
{
struct mtd_part *slave, *next;
@@ -644,6 +656,35 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
+#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
+#else
+#define SPLIT_FIRMWARE_NAME "unused"
+#endif
+
+static void split_firmware(struct mtd_info *master, struct mtd_part *part)
+{
+}
+
+void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,
+ int offset, int size)
+{
+}
+
+static void mtd_partition_split(struct mtd_info *master, struct mtd_part *part)
+{
+ static int rootfs_found = 0;
+
+ if (rootfs_found)
+ return;
+
+ if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
+ config_enabled(CONFIG_MTD_SPLIT_FIRMWARE))
+ split_firmware(master, part);
+
+ arch_split_mtd_part(master, part->mtd.name, part->offset,
+ part->mtd.size);
+}
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -673,6 +714,7 @@ int add_mtd_partitions(struct mtd_info *
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
+ mtd_partition_split(master, slave);
mtd_add_partition_attrs(slave);
cur_offset = slave->offset + slave->mtd.size;
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -84,5 +84,7 @@ int mtd_add_partition(struct mtd_info *m
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+extern void __weak arch_split_mtd_part(struct mtd_info *master,
+ const char *name, int offset, int size);
#endif

View file

@ -0,0 +1,113 @@
From 02cff0ccaa6d364f5c1eeea83f47ac80ccc967d4 Mon Sep 17 00:00:00 2001
From: Gabor Juhos <juhosg@openwrt.org>
Date: Tue, 3 Sep 2013 18:11:50 +0200
Subject: [PATCH] mtd: add support for different partition parser types
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
drivers/mtd/mtdpart.c | 56 ++++++++++++++++++++++++++++++++++++++++
include/linux/mtd/partitions.h | 11 ++++++++
2 files changed, 67 insertions(+)
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -745,6 +745,30 @@ static struct mtd_part_parser *get_parti
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+static struct mtd_part_parser *
+get_partition_parser_by_type(enum mtd_parser_type type,
+ struct mtd_part_parser *start)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ p = list_prepare_entry(start, &part_parsers, list);
+ if (start)
+ put_partition_parser(start);
+
+ list_for_each_entry_continue(p, &part_parsers, list) {
+ if (p->type == type && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
void register_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
@@ -862,6 +886,38 @@ int parse_mtd_partitions(struct mtd_info
return ret;
}
+int parse_mtd_partitions_by_type(struct mtd_info *master,
+ enum mtd_parser_type type,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_part_parser *prev = NULL;
+ int ret = 0;
+
+ while (1) {
+ struct mtd_part_parser *parser;
+
+ parser = get_partition_parser_by_type(type, prev);
+ if (!parser)
+ break;
+
+ ret = (*parser->parse_fn)(master, pparts, data);
+
+ if (ret > 0) {
+ put_partition_parser(parser);
+ printk(KERN_NOTICE
+ "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
+ break;
+ }
+
+ prev = parser;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
+
int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -68,12 +68,17 @@ struct mtd_part_parser_data {
* Functions dealing with the various ways of partitioning the space
*/
+enum mtd_parser_type {
+ MTD_PARSER_TYPE_DEVICE = 0,
+};
+
struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
struct mtd_part_parser_data *);
+ enum mtd_parser_type type;
};
extern void register_mtd_parser(struct mtd_part_parser *parser);
@@ -87,4 +92,9 @@ uint64_t mtd_get_device_size(const struc
extern void __weak arch_split_mtd_part(struct mtd_info *master,
const char *name, int offset, int size);
+int parse_mtd_partitions_by_type(struct mtd_info *master,
+ enum mtd_parser_type type,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
#endif

View file

@ -0,0 +1,72 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -656,6 +656,37 @@ int mtd_del_partition(struct mtd_info *m
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
+static int
+run_parsers_by_type(struct mtd_part *slave, enum mtd_parser_type type)
+{
+ struct mtd_partition *parts;
+ int nr_parts;
+ int i;
+
+ nr_parts = parse_mtd_partitions_by_type(&slave->mtd, type, &parts,
+ NULL);
+ if (nr_parts <= 0)
+ return nr_parts;
+
+ if (WARN_ON(!parts))
+ return 0;
+
+ for (i = 0; i < nr_parts; i++) {
+ /* adjust partition offsets */
+ parts[i].offset += slave->offset;
+
+ __mtd_add_partition(slave->master,
+ parts[i].name,
+ parts[i].offset,
+ parts[i].size,
+ false);
+ }
+
+ kfree(parts);
+
+ return nr_parts;
+}
+
#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
#else
@@ -664,6 +695,7 @@ EXPORT_SYMBOL_GPL(mtd_del_partition);
static void split_firmware(struct mtd_info *master, struct mtd_part *part)
{
+ run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
}
void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,
@@ -678,6 +710,12 @@ static void mtd_partition_split(struct m
if (rootfs_found)
return;
+ if (!strcmp(part->mtd.name, "rootfs")) {
+ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
+
+ rootfs_found = 1;
+ }
+
if (!strcmp(part->mtd.name, SPLIT_FIRMWARE_NAME) &&
config_enabled(CONFIG_MTD_SPLIT_FIRMWARE))
split_firmware(master, part);
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -70,6 +70,8 @@ struct mtd_part_parser_data {
enum mtd_parser_type {
MTD_PARSER_TYPE_DEVICE = 0,
+ MTD_PARSER_TYPE_ROOTFS,
+ MTD_PARSER_TYPE_FIRMWARE,
};
struct mtd_part_parser {

View file

@ -0,0 +1,22 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -27,6 +27,8 @@ config MTD_SPLIT_FIRMWARE_NAME
depends on MTD_SPLIT_FIRMWARE
default "firmware"
+source "drivers/mtd/mtdsplit/Kconfig"
+
endmenu
config MTD_TESTS
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
+obj-$(CONFIG_MTD_SPLIT) += mtdsplit/
+
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o

View file

@ -0,0 +1,101 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -453,14 +453,12 @@ static struct mtd_part *allocate_partiti
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if (mtd_mod_by_eb(cur_offset, master) != 0) {
- /* Round up to next erasesize */
- slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ /* Round up to next erasesize */
+ slave->offset = mtd_roundup_to_eb(cur_offset, master);
+ if (slave->offset != cur_offset)
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
- }
}
if (slave->offset == MTDPART_OFS_RETAIN) {
slave->offset = cur_offset;
@@ -687,6 +685,17 @@ run_parsers_by_type(struct mtd_part *sla
return nr_parts;
}
+static inline unsigned long
+mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
+{
+ unsigned long mask = mtd->erasesize - 1;
+
+ len += offset & mask;
+ len = (len + mask) & ~mask;
+ len -= offset & mask;
+ return len;
+}
+
#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
#else
@@ -973,6 +982,24 @@ int mtd_is_partition(const struct mtd_in
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return (struct mtd_info *)mtd;
+
+ return PART(mtd)->master;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_master);
+
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return 0;
+
+ return PART(mtd)->offset;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_offset);
+
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -90,6 +90,8 @@ int mtd_is_partition(const struct mtd_in
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
extern void __weak arch_split_mtd_part(struct mtd_info *master,
const char *name, int offset, int size);
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -334,6 +334,24 @@ static inline uint32_t mtd_mod_by_eb(uin
return do_div(sz, mtd->erasesize);
}
+static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd_mod_by_eb(sz, mtd) == 0)
+ return sz;
+
+ /* Round up to next erase block */
+ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
+}
+
+static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd_mod_by_eb(sz, mtd) == 0)
+ return sz;
+
+ /* Round down to the start of the current erase block */
+ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)

View file

@ -0,0 +1,70 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -27,6 +27,11 @@ config MTD_SPLIT_FIRMWARE_NAME
depends on MTD_SPLIT_FIRMWARE
default "firmware"
+config MTD_UIMAGE_SPLIT
+ bool "Enable split support for firmware partitions containing a uImage"
+ depends on MTD_SPLIT_FIRMWARE
+ default y
+
source "drivers/mtd/mtdsplit/Kconfig"
endmenu
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -696,6 +696,37 @@ mtd_pad_erasesize(struct mtd_info *mtd,
return len;
}
+#define UBOOT_MAGIC 0x27051956
+
+static void split_uimage(struct mtd_info *master, struct mtd_part *part)
+{
+ struct {
+ __be32 magic;
+ __be32 pad[2];
+ __be32 size;
+ } hdr;
+ size_t len;
+
+ if (mtd_read(master, part->offset, sizeof(hdr), &len, (void *) &hdr))
+ return;
+
+ if (len != sizeof(hdr) || hdr.magic != cpu_to_be32(UBOOT_MAGIC))
+ return;
+
+ len = be32_to_cpu(hdr.size) + 0x40;
+ len = mtd_pad_erasesize(master, part->offset, len);
+ if (len + master->erasesize > part->mtd.size)
+ return;
+
+ if (config_enabled(CONFIG_MTD_SPLIT_UIMAGE_FW))
+ pr_err("Dedicated partitioner didn't split firmware partition, please fill a bug report!\n");
+ else
+ pr_warn("Support for built-in firmware splitter will be removed, please use CONFIG_MTD_SPLIT_UIMAGE_FW\n");
+
+ __mtd_add_partition(master, "rootfs", part->offset + len,
+ part->mtd.size - len, false);
+}
+
#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
#else
@@ -704,7 +735,14 @@ mtd_pad_erasesize(struct mtd_info *mtd,
static void split_firmware(struct mtd_info *master, struct mtd_part *part)
{
- run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
+ int ret;
+
+ ret = run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
+ if (ret > 0)
+ return;
+
+ if (config_enabled(CONFIG_MTD_UIMAGE_SPLIT))
+ split_uimage(master, part);
}
void __weak arch_split_mtd_part(struct mtd_info *master, const char *name,

View file

@ -0,0 +1,18 @@
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -35,6 +35,7 @@
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_info;
struct mtd_partition {
const char *name; /* identifier string */
@@ -50,7 +51,6 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
-struct mtd_info;
struct device_node;
/**

View file

@ -0,0 +1,142 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -37,6 +37,8 @@
#include "mtdcore.h"
#include "mtdsplit/mtdsplit.h"
+#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -235,13 +237,61 @@ static int part_erase(struct mtd_info *m
struct mtd_part *part = PART(mtd);
int ret;
+
+ instr->partial_start = false;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ size_t readlen = 0;
+ u64 mtd_ofs;
+
+ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
+ if (!instr->erase_buf)
+ return -ENOMEM;
+
+ mtd_ofs = part->offset + instr->addr;
+ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->addr -= instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ instr->addr + part->offset,
+ part->master->erasesize,
+ &readlen, instr->erase_buf);
+
+ instr->len += instr->erase_buf_ofs;
+ instr->partial_start = true;
+ } else {
+ mtd_ofs = part->offset + part->mtd.size;
+ instr->erase_buf_ofs = part->master->erasesize -
+ do_div(mtd_ofs, part->master->erasesize);
+
+ if (instr->erase_buf_ofs > 0) {
+ instr->len += instr->erase_buf_ofs;
+ ret = mtd_read(part->master,
+ part->offset + instr->addr +
+ instr->len - part->master->erasesize,
+ part->master->erasesize, &readlen,
+ instr->erase_buf);
+ } else {
+ ret = 0;
+ }
+ }
+ if (ret < 0) {
+ kfree(instr->erase_buf);
+ return ret;
+ }
+
+ }
+
instr->addr += part->offset;
ret = part->master->_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL)
+ kfree(instr->erase_buf);
}
+
return ret;
}
@@ -249,7 +299,25 @@ void mtd_erase_callback(struct erase_inf
{
if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
+ size_t wrlen = 0;
+ if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
+ if (instr->partial_start) {
+ part->master->_write(part->master,
+ instr->addr, instr->erase_buf_ofs,
+ &wrlen, instr->erase_buf);
+ instr->addr += instr->erase_buf_ofs;
+ } else {
+ instr->len -= instr->erase_buf_ofs;
+ part->master->_write(part->master,
+ instr->addr + instr->len,
+ instr->erase_buf_ofs, &wrlen,
+ instr->erase_buf +
+ part->master->erasesize -
+ instr->erase_buf_ofs);
+ }
+ kfree(instr->erase_buf);
+ }
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
@@ -522,17 +590,20 @@ static struct mtd_part *allocate_partiti
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of
- * _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- part->name);
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+ if (((u32) slave->mtd.size) > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- part->name);
+ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
+ slave->mtd.flags |= MTD_ERASE_PARTIAL;
+
+ if ((u32) slave->mtd.size > master->erasesize)
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ else
+ slave->mtd.erasesize = slave->mtd.size;
}
slave->mtd.ecclayout = master->ecclayout;
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -55,6 +55,10 @@ struct erase_info {
u_long priv;
u_char state;
struct erase_info *next;
+
+ u8 *erase_buf;
+ u32 erase_buf_ofs;
+ bool partial_start;
};
struct mtd_erase_region_info {

View file

@ -0,0 +1,18 @@
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -336,7 +336,14 @@ static int part_lock(struct mtd_info *mt
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->_unlock(part->master, ofs + part->offset, len);
+
+ ofs += part->offset;
+ if (mtd->flags & MTD_ERASE_PARTIAL) {
+ /* round up len to next erasesize and round down offset to prev block */
+ len = (mtd_div_by_eb(len, part->master) + 1) * part->master->erasesize;
+ ofs &= ~(part->master->erasesize - 1);
+ }
+ return part->master->_unlock(part->master, ofs, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)

View file

@ -0,0 +1,30 @@
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -265,14 +265,21 @@ static int parse_redboot_partitions(stru
#endif
names += strlen(names)+1;
-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
- i++;
- parts[i].offset = parts[i-1].size + parts[i-1].offset;
- parts[i].size = fl->next->img->flash_base - parts[i].offset;
- parts[i].name = nullname;
- }
+ if (!strcmp(parts[i].name, "rootfs")) {
+ parts[i].size = fl->next->img->flash_base;
+ parts[i].size &= ~(master->erasesize - 1);
+ parts[i].size -= parts[i].offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ nrparts--;
+ } else {
+ i++;
+ parts[i].offset = parts[i-1].size + parts[i-1].offset;
+ parts[i].size = fl->next->img->flash_base - parts[i].offset;
+ parts[i].name = nullname;
#endif
+ }
+ }
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);

View file

@ -0,0 +1,35 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -179,6 +179,22 @@ config MTD_BCM47XX_PARTS
This provides partitions parser for devices based on BCM47xx
boards.
+config MTD_MYLOADER_PARTS
+ tristate "MyLoader partition parsing"
+ depends on ADM5120 || ATH25 || ATH79
+ ---help---
+ MyLoader is a bootloader which allows the user to define partitions
+ in flash devices, by putting a table in the second erase block
+ on the device, similar to a partition table. This table gives the
+ offsets and lengths of the user defined partitions.
+
+ If you need code which can detect and parse these tables, and
+ register MTD 'partitions' corresponding to each image detected,
+ enable this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically.
+
comment "User Modules And Translation Layers"
#
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o

View file

@ -0,0 +1,34 @@
From 841e59ba3e496d86ca5f069204d5e5c1ad43c01d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
Date: Tue, 27 Jan 2015 22:29:21 +0100
Subject: [PATCH] mtd: bcm47xxpart: support for Xiaomi specific board_data
partition
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
---
drivers/mtd/bcm47xxpart.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -33,6 +33,7 @@
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
#define BOARD_DATA_MAGIC2 0xBD0D0BBD
+#define BOARD_DATA_XIAOMI_MAGIC 0x474D4442 /* GMDB */
#define CFE_MAGIC 0x43464531 /* 1EFC */
#define FACTORY_MAGIC 0x59544346 /* FCTY */
#define NVRAM_HEADER 0x48534C46 /* FLSH */
@@ -262,7 +263,8 @@ static int bcm47xxpart_parse(struct mtd_
}
/* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
- if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+ if (buf[0x000 / 4] == BOARD_DATA_MAGIC2 ||
+ le32_to_cpu(buf[0x000 / 4]) == BOARD_DATA_XIAOMI_MAGIC) {
bcm47xxpart_add_part(&parts[curr_part++], "board_data",
offset, MTD_WRITEABLE);
continue;

View file

@ -0,0 +1,42 @@
From fd54aa583296f9adfb1f519affbc10ba521eb809 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
Date: Wed, 28 Jan 2015 22:14:41 +0100
Subject: [PATCH] mtd: bcm47xxpart: detect T_Meter partition
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
It can be found on many Netgear devices. It consists of many 0x30 blocks
starting with 4D 54.
Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
---
drivers/mtd/bcm47xxpart.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -39,6 +39,7 @@
#define NVRAM_HEADER 0x48534C46 /* FLSH */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
+#define T_METER_MAGIC 0x4D540000 /* MT */
#define ML_MAGIC1 0x39685a42
#define ML_MAGIC2 0x26594131
#define TRX_MAGIC 0x30524448
@@ -176,6 +177,15 @@ static int bcm47xxpart_parse(struct mtd_
MTD_WRITEABLE);
continue;
}
+
+ /* T_Meter */
+ if ((le32_to_cpu(buf[0x000 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
+ (le32_to_cpu(buf[0x030 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
+ (le32_to_cpu(buf[0x060 / 4]) & 0xFFFF0000) == T_METER_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "T_Meter", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
/* TRX */
if (buf[0x000 / 4] == TRX_MAGIC) {

View file

@ -0,0 +1,108 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -219,7 +220,7 @@ static void block2mtd_free_device(struct
static struct block2mtd_dev *add_device(char *devname, int erase_size,
- int timeout)
+ const char *mtdname, int timeout)
{
#ifndef MODULE
int i;
@@ -227,6 +228,7 @@ static struct block2mtd_dev *add_device(
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
struct block_device *bdev = ERR_PTR(-ENODEV);
struct block2mtd_dev *dev;
+ struct mtd_partition *part;
char *name;
if (!devname)
@@ -283,13 +285,16 @@ static struct block2mtd_dev *add_device(
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+ if (!mtdname)
+ mtdname = devname;
+ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
if (!name)
goto err_destroy_mutex;
+ strcpy(name, mtdname);
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
@@ -302,7 +307,11 @@ static struct block2mtd_dev *add_device(
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (mtd_device_register(&dev->mtd, NULL, 0)) {
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = name;
+ part->offset = 0;
+ part->size = dev->mtd.size;
+ if (mtd_device_register(&dev->mtd, part, 1)) {
/* Device didn't get added, so free the entry */
goto err_destroy_mutex;
}
@@ -310,8 +319,7 @@ static struct block2mtd_dev *add_device(
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
dev->mtd.index,
- dev->mtd.name + strlen("block2mtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
err_destroy_mutex:
@@ -384,7 +392,7 @@ static int block2mtd_setup2(const char *
/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
char buf[80 + 12 + 80 + 8];
char *str = buf;
- char *token[2];
+ char *token[3];
char *name;
size_t erase_size = PAGE_SIZE;
unsigned long timeout = MTD_DEFAULT_TIMEOUT;
@@ -398,7 +406,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str) {
@@ -424,8 +432,10 @@ static int block2mtd_setup2(const char *
return 0;
}
}
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ pr_err("mtd device name too long\n");
- add_device(name, erase_size, timeout);
+ add_device(name, erase_size, token[2], timeout);
return 0;
}
@@ -459,7 +469,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
static int __init block2mtd_init(void)
{

View file

@ -0,0 +1,39 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -392,7 +392,7 @@ static int block2mtd_setup2(const char *
/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
char buf[80 + 12 + 80 + 8];
char *str = buf;
- char *token[3];
+ char *token[4];
char *name;
size_t erase_size = PAGE_SIZE;
unsigned long timeout = MTD_DEFAULT_TIMEOUT;
@@ -406,7 +406,7 @@ static int block2mtd_setup2(const char *
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
token[i] = strsep(&str, ",");
if (str) {
@@ -435,6 +435,9 @@ static int block2mtd_setup2(const char *
if (token[2] && (strlen(token[2]) + 1 > 80))
pr_err("mtd device name too long\n");
+ if (token[3] && kstrtoul(token[3], 0, &timeout))
+ pr_err("invalid timeout\n");
+
add_device(name, erase_size, token[2], timeout);
return 0;
@@ -469,7 +472,7 @@ static int block2mtd_setup(const char *v
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\"");
static int __init block2mtd_init(void)
{

View file

@ -0,0 +1,37 @@
---
drivers/mtd/nand/plat_nand.c | 13 ++++++++++++-
include/linux/mtd/nand.h | 1 +
2 files changed, 13 insertions(+), 1 deletion(-)
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -869,6 +869,7 @@ struct platform_nand_chip {
unsigned int options;
unsigned int bbt_options;
const char **part_probe_types;
+ int (*chip_fixup)(struct mtd_info *mtd);
};
/* Keep gcc happy */
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -88,7 +88,18 @@ static int plat_nand_probe(struct platfo
}
/* Scan to find existence of the device */
- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) {
+ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (pdata->chip.chip_fixup) {
+ err = pdata->chip.chip_fixup(&data->mtd);
+ if (err)
+ goto out;
+ }
+
+ if (nand_scan_tail(&data->mtd)) {
err = -ENXIO;
goto out;
}

View file

@ -0,0 +1,11 @@
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -507,7 +507,7 @@ int __nand_correct_data(unsigned char *b
return 1; /* error in ECC data; no action needed */
pr_err("%s: uncorrectable ECC error\n", __func__);
- return -1;
+ return -EBADMSG;
}
EXPORT_SYMBOL(__nand_correct_data);

View file

@ -0,0 +1,11 @@
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -809,7 +809,7 @@ static int get_chip(struct map_info *map
return 0;
case FL_ERASING:
- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;

View file

@ -0,0 +1,18 @@
From: George Kashperko <george@znau.edu.ua>
Issue map read after Write Buffer Load command to ensure chip is ready
to receive data.
Signed-off-by: George Kashperko <george@znau.edu.ua>
---
drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1830,6 +1830,7 @@ static int __xipram do_write_buffer(stru
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
+ (void) map_read(map, cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;

View file

@ -0,0 +1,22 @@
From 34e2b403040a2f9d3ba071d95a7f42457e2950f9 Mon Sep 17 00:00:00 2001
From: Gabor Juhos <juhosg@openwrt.org>
Date: Tue, 7 Apr 2015 18:35:15 +0200
Subject: [PATCH] mtd: spi-nor: add support for the ISSI SI25CD512 SPI flash
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
drivers/mtd/spi-nor/spi-nor.c | 3 +++
1 file changed, 3 insertions(+)
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -566,6 +566,9 @@ static const struct flash_info spi_nor_i
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },

View file

@ -0,0 +1,26 @@
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/kconfig.h>
+#include <linux/root_dev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -456,6 +457,15 @@ int add_mtd_device(struct mtd_info *mtd)
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
+
+ if (!strcmp(mtd->name, "rootfs") &&
+ config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ ROOT_DEV == 0) {
+ pr_notice("mtd: device %d (%s) set to be root filesystem\n",
+ mtd->index, mtd->name);
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ }
+
return 0;
fail_added:

View file

@ -0,0 +1,76 @@
From 8a52e4100d7c3a4a1dfddfa02b8864a9b0068c13 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Sat, 17 May 2014 03:36:18 +0200
Subject: [PATCH 1/5] ubi: auto-attach mtd device named "ubi" or "data" on boot
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1200,6 +1200,49 @@ static struct mtd_info * __init open_mtd
return mtd;
}
+/*
+ * This function tries attaching mtd partitions named either "ubi" or "data"
+ * during boot.
+ */
+static void __init ubi_auto_attach(void)
+{
+ int err;
+ struct mtd_info *mtd;
+
+ /* try attaching mtd device named "ubi" or "data" */
+ mtd = open_mtd_device("ubi");
+ if (IS_ERR(mtd))
+ mtd = open_mtd_device("data");
+
+ if (!IS_ERR(mtd)) {
+ size_t len;
+ char magic[4];
+
+ /* check for a valid ubi magic */
+ err = mtd_read(mtd, 0, 4, &len, (void *) magic);
+ if (!err && len == 4 && strncmp(magic, "UBI#", 4)) {
+ pr_err("UBI error: no valid UBI magic found inside mtd%d", mtd->index);
+ put_mtd_device(mtd);
+ return;
+ }
+
+ /* auto-add only media types where UBI makes sense */
+ if (mtd->type == MTD_NANDFLASH ||
+ mtd->type == MTD_NORFLASH ||
+ mtd->type == MTD_DATAFLASH ||
+ mtd->type == MTD_MLCNANDFLASH) {
+ mutex_lock(&ubi_devices_mutex);
+ pr_notice("UBI: auto-attach mtd%d", mtd->index);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ pr_err("UBI error: cannot attach mtd%d", mtd->index);
+ put_mtd_device(mtd);
+ }
+ }
+ }
+}
+
static int __init ubi_init(void)
{
int err, i, k;
@@ -1283,6 +1326,12 @@ static int __init ubi_init(void)
}
}
+ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd
+ * parameter was given */
+ if (config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ !ubi_is_module() && !mtd_devs)
+ ubi_auto_attach();
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d", err);

View file

@ -0,0 +1,69 @@
From 0f3966579815f889bb2fcb4846152c35f65e79c4 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 15 May 2014 21:06:33 +0200
Subject: [PATCH 2/5] ubi: auto-create ubiblock device for rootfs
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -628,6 +628,44 @@ static void __init ubiblock_create_from_
}
}
+#define UBIFS_NODE_MAGIC 0x06101831
+static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
+{
+ int ret;
+ uint32_t magic_of, magic;
+ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
+ if (ret)
+ return 0;
+ magic = le32_to_cpu(magic_of);
+ return magic == UBIFS_NODE_MAGIC;
+}
+
+static void __init ubiblock_create_auto_rootfs(void)
+{
+ int ubi_num, ret, is_ubifs;
+ struct ubi_volume_desc *desc;
+ struct ubi_volume_info vi;
+
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
+ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
+ if (IS_ERR(desc))
+ continue;
+
+ ubi_get_volume_info(desc, &vi);
+ is_ubifs = ubi_vol_is_ubifs(desc);
+ ubi_close_volume(desc);
+ if (is_ubifs)
+ break;
+
+ ret = ubiblock_create(&vi);
+ if (ret)
+ pr_err("UBI error: block: can't add '%s' volume, err=%d\n",
+ vi.name, ret);
+ /* always break if we get here */
+ break;
+ }
+}
+
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -658,6 +696,10 @@ int __init ubiblock_init(void)
*/
ubiblock_create_from_param();
+ /* auto-attach "rootfs" volume if existing and non-ubifs */
+ if (config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV))
+ ubiblock_create_auto_rootfs();
+
/*
* Block devices are only created upon user requests, so we ignore
* existing volumes.

View file

@ -0,0 +1,53 @@
From eea9e1785e4c05c2a3444506aabafa0ae958538f Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Sat, 17 May 2014 03:35:02 +0200
Subject: [PATCH 4/5] try auto-mounting ubi0:rootfs in init/do_mounts.c
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
init/do_mounts.c | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -438,7 +438,27 @@ retry:
out:
put_page(page);
}
-
+
+static int __init mount_ubi_rootfs(void)
+{
+ int flags = MS_SILENT;
+ int err, tried = 0;
+
+ while (tried < 2) {
+ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \
+ root_mount_data);
+ switch (err) {
+ case -EACCES:
+ flags |= MS_RDONLY;
+ tried++;
+ default:
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_ROOT_NFS
#define NFSROOT_TIMEOUT_MIN 5
@@ -532,6 +552,10 @@ void __init mount_root(void)
change_floppy("root floppy");
}
#endif
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
+ if (!mount_ubi_rootfs())
+ return;
+#endif
#ifdef CONFIG_BLOCK
{
int err = create_dev("/dev/root", ROOT_DEV);

View file

@ -0,0 +1,37 @@
From cd68d1b12b5ea4c01a664c064179ada42bf55d3d Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 15 May 2014 20:55:42 +0200
Subject: [PATCH 5/5] ubi: set ROOT_DEV to ubiblock "rootfs" if unset
To: openwrt-devel@lists.openwrt.org
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/mtd/ubi/block.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -50,6 +50,7 @@
#include <linux/scatterlist.h>
#include <linux/idr.h>
#include <asm/div64.h>
+#include <linux/root_dev.h>
#include "ubi-media.h"
#include "ubi.h"
@@ -448,6 +449,15 @@ int ubiblock_create(struct ubi_volume_in
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+
+ if (!strcmp(vi->name, "rootfs") &&
+ config_enabled(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+ ROOT_DEV == 0) {
+ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
+ dev->ubi_num, dev->vol_id, vi->name);
+ ROOT_DEV = MKDEV(gd->major, gd->first_minor);
+ }
+
return 0;
out_free_queue:

View file

@ -0,0 +1,51 @@
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -803,6 +803,13 @@ out_unlock:
return err;
}
+static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech)
+{
+ return ech->padding1[0] == 'E' &&
+ ech->padding1[1] == 'O' &&
+ ech->padding1[2] == 'F';
+}
+
/**
* scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
@@ -833,9 +840,21 @@ static int scan_peb(struct ubi_device *u
return 0;
}
- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
- if (err < 0)
- return err;
+ if (!ai->eof_found) {
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+
+ if (ec_hdr_has_eof(ech)) {
+ pr_notice("UBI: EOF marker found, PEBs from %d will be erased",
+ pnum);
+ ai->eof_found = true;
+ }
+ }
+
+ if (ai->eof_found)
+ err = UBI_IO_FF_BITFLIPS;
+
switch (err) {
case 0:
break;
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -739,6 +739,7 @@ struct ubi_attach_info {
int mean_ec;
uint64_t ec_sum;
int ec_count;
+ bool eof_found;
struct kmem_cache *aeb_slab_cache;
};

View file

@ -0,0 +1,18 @@
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -30,6 +30,7 @@ source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"
+source "fs/yaffs2/Kconfig"
config FS_DAX
bool "Direct Access (DAX) support"
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -125,3 +125,5 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
+

View file

@ -0,0 +1,239 @@
Subject: yaffs: fix compat tags handling
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/fs/yaffs2/yaffs_tagscompat.c
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -17,7 +17,9 @@
#include "yaffs_getblockinfo.h"
#include "yaffs_trace.h"
+#if 0
static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+#endif
/********** Tags ECC calculations *********/
@@ -71,6 +73,7 @@ int yaffs_check_tags_ecc(struct yaffs_ta
return 0;
}
+#if 0
/********** Tags **********/
static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
@@ -379,3 +382,214 @@ void yaffs_tags_compat_install(struct ya
if(!dev->tagger.mark_bad_fn)
dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
}
+#else
+
+#include "yaffs_packedtags1.h"
+
+static int yaffs_tags_compat_write(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags1 pt1;
+ u8 tag_buf[9];
+ int retval;
+
+ /* we assume that yaffs_packed_tags1 and yaffs_tags are compatible */
+ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+ compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+ yaffs_pack_tags1(&pt1, tags);
+ yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * tags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+ if (!dev->param.tags_9bytes) {
+ if (tags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+ memcpy(tag_buf, &pt1, 8);
+ } else {
+ if (tags->is_deleted) {
+ memset(tag_buf, 0xff, 8);
+ tag_buf[8] = 0;
+ } else {
+ memcpy(tag_buf, &pt1, 8);
+ tag_buf[8] = 0xff;
+ }
+ }
+
+ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data,
+ (data) ? dev->data_bytes_per_chunk : 0,
+ tag_buf,
+ (dev->param.tags_9bytes) ? 9 : 8);
+
+ return retval;
+}
+
+/* Return with empty extended tags but add ecc_result.
+ */
+static int return_empty_tags(struct yaffs_ext_tags *tags,
+ enum yaffs_ecc_result ecc_result,
+ int retval)
+{
+ if (tags) {
+ memset(tags, 0, sizeof(*tags));
+ tags->ecc_result = ecc_result;
+ }
+
+ return retval;
+}
+
+static int yaffs_tags_compat_read(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags1 pt1;
+ enum yaffs_ecc_result ecc_result;
+ int retval;
+ int deleted;
+ u8 tag_buf[9];
+
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ tag_buf,
+ (dev->param.tags_9bytes) ? 9 : 8,
+ &ecc_result);
+
+ switch (ecc_result) {
+ case YAFFS_ECC_RESULT_NO_ERROR:
+ case YAFFS_ECC_RESULT_FIXED:
+ break;
+
+ case YAFFS_ECC_RESULT_UNFIXED:
+ default:
+ return_empty_tags(tags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ tags->block_bad = dev->drv.drv_check_bad_fn(dev, nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk. */
+ if (yaffs_check_ff(tag_buf, 8)) {
+ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return return_empty_tags(tags, YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_OK);
+ }
+
+ memcpy(&pt1, tag_buf, 8);
+
+ if (!dev->param.tags_9bytes) {
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+ } else {
+ deleted = (hweight8(tag_buf[8]) < 7) ? 1 : 0;
+ }
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible. */
+ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->n_tags_ecc_fixed++;
+ if (ecc_result == YAFFS_ECC_RESULT_NO_ERROR)
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->n_tags_ecc_unfixed++;
+ return return_empty_tags(tags, YAFFS_ECC_RESULT_UNFIXED,
+ YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.should_be_ff = 0xffffffff;
+ yaffs_unpack_tags1(tags, &pt1);
+ tags->ecc_result = ecc_result;
+
+ /* Set deleted state */
+ tags->is_deleted = deleted;
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ return dev->drv.drv_mark_bad_fn(dev, block_no);
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_ext_tags tags;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "%s %d", __func__, block_no);
+
+ *seq_number = 0;
+
+ retval = dev->drv.drv_check_bad_fn(dev, block_no);
+ if (retval == YAFFS_FAIL) {
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ goto out;
+ }
+
+ yaffs_tags_compat_read(dev, block_no * dev->param.chunks_per_block,
+ NULL, &tags);
+
+ if (tags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block %d is marked bad",
+ block_no);
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else if (tags.chunk_used) {
+ *seq_number = tags.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ retval = YAFFS_OK;
+
+out:
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block query returns seq %u state %d",
+ *seq_number, *state);
+
+ return retval;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+ if (dev->param.is_yaffs2)
+ return;
+
+ if (!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_write;
+
+ if (!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_read;
+
+ if (!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+
+ if (!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
+#endif

View file

@ -0,0 +1,115 @@
Subject: yaffs: add support for tags-9bytes mount option
Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
---
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -2644,6 +2644,7 @@ static const struct super_operations yaf
struct yaffs_options {
int inband_tags;
+ int tags_9bytes;
int skip_checkpoint_read;
int skip_checkpoint_write;
int no_cache;
@@ -2683,6 +2684,8 @@ static int yaffs_parse_options(struct ya
if (!strcmp(cur_opt, "inband-tags")) {
options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-9bytes")) {
+ options->tags_9bytes = 1;
} else if (!strcmp(cur_opt, "tags-ecc-off")) {
options->tags_ecc_on = 0;
options->tags_ecc_overridden = 1;
@@ -2756,7 +2759,6 @@ static struct super_block *yaffs_interna
struct yaffs_param *param;
int read_only = 0;
- int inband_tags = 0;
struct yaffs_options options;
@@ -2796,6 +2798,9 @@ static struct super_block *yaffs_interna
memset(&options, 0, sizeof(options));
+ if (IS_ENABLED(CONFIG_YAFFS_9BYTE_TAGS))
+ options.tags_9bytes = 1;
+
if (yaffs_parse_options(&options, data_str)) {
/* Option parsing failed */
return NULL;
@@ -2829,17 +2834,22 @@ static struct super_block *yaffs_interna
}
/* Added NCB 26/5/2006 for completeness */
- if (yaffs_version == 2 && !options.inband_tags
- && WRITE_SIZE(mtd) == 512) {
+ if (yaffs_version == 2 &&
+ (!options.inband_tags || options.tags_9bytes) &&
+ WRITE_SIZE(mtd) == 512) {
yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
yaffs_version = 1;
}
- if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) ||
- options.inband_tags)
- inband_tags = 1;
+ if (yaffs_version == 2 &&
+ mtd->oobavail < sizeof(struct yaffs_packed_tags2)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting inband tags");
+ options.inband_tags = 1;
+ }
- if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0)
+ err = yaffs_verify_mtd(mtd, yaffs_version, options.inband_tags,
+ options.tags_9bytes);
+ if (err < 0)
return NULL;
/* OK, so if we got here, we have an MTD that's NAND and looks
@@ -2896,7 +2906,8 @@ static struct super_block *yaffs_interna
param->n_reserved_blocks = 5;
param->n_caches = (options.no_cache) ? 0 : 10;
- param->inband_tags = inband_tags;
+ param->inband_tags = options.inband_tags;
+ param->tags_9bytes = options.tags_9bytes;
param->enable_xattr = 1;
if (options.lazy_loading_overridden)
--- a/fs/yaffs2/yaffs_mtdif.c
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -278,7 +278,8 @@ struct mtd_info * yaffs_get_mtd_device(d
return mtd;
}
-int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags,
+ int tags_9bytes)
{
if (yaffs_version == 2) {
if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
@@ -297,6 +298,12 @@ int yaffs_verify_mtd(struct mtd_info *mt
);
return -1;
}
+
+ if (tags_9bytes && mtd->oobavail < 9) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support 9-byte tags");
+ return -1;
+ }
}
return 0;
--- a/fs/yaffs2/yaffs_mtdif.h
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -21,5 +21,6 @@
void yaffs_mtd_drv_install(struct yaffs_dev *dev);
struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
void yaffs_put_mtd_device(struct mtd_info *mtd);
-int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags,
+ int tags_9bytes);
#endif

View file

@ -0,0 +1,29 @@
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -774,7 +774,25 @@ static int yaffs_sync_object(struct file
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static const struct file_operations yaffs_file_operations = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+ .read = new_sync_read,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) */
+ .read_iter = generic_file_read_iter,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+ .write = new_sync_write,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) */
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
static const struct file_operations yaffs_file_operations = {
.read = do_sync_read,
.write = do_sync_write,

View file

@ -0,0 +1,95 @@
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -283,7 +283,7 @@ static int yaffs_readpage_nolock(struct
(long long)pos,
(unsigned)PAGE_CACHE_SIZE);
- obj = yaffs_dentry_to_obj(f->f_dentry);
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
dev = obj->my_dev;
@@ -481,7 +481,7 @@ static ssize_t yaffs_hold_space(struct f
int n_free_chunks;
- obj = yaffs_dentry_to_obj(f->f_dentry);
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
dev = obj->my_dev;
@@ -499,7 +499,7 @@ static void yaffs_release_space(struct f
struct yaffs_obj *obj;
struct yaffs_dev *dev;
- obj = yaffs_dentry_to_obj(f->f_dentry);
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
dev = obj->my_dev;
@@ -591,7 +591,7 @@ static ssize_t yaffs_file_write(struct f
struct inode *inode;
struct yaffs_dev *dev;
- obj = yaffs_dentry_to_obj(f->f_dentry);
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
if (!obj) {
yaffs_trace(YAFFS_TRACE_OS,
@@ -603,7 +603,7 @@ static ssize_t yaffs_file_write(struct f
yaffs_gross_lock(dev);
- inode = f->f_dentry->d_inode;
+ inode = f->f_path.dentry->d_inode;
if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
ipos = inode->i_size;
@@ -727,7 +727,7 @@ static int yaffs_file_flush(struct file
static int yaffs_file_flush(struct file *file)
#endif
{
- struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_path.dentry);
struct yaffs_dev *dev = obj->my_dev;
@@ -1734,7 +1734,7 @@ static int yaffs_iterate(struct file *f,
char name[YAFFS_MAX_NAME_LENGTH + 1];
- obj = yaffs_dentry_to_obj(f->f_dentry);
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
dev = obj->my_dev;
yaffs_gross_lock(dev);
@@ -1798,14 +1798,14 @@ static int yaffs_readdir(struct file *f,
struct yaffs_obj *obj;
struct yaffs_dev *dev;
struct yaffs_search_context *sc;
- struct inode *inode = f->f_dentry->d_inode;
+ struct inode *inode = f->f_path.dentry->d_inode;
unsigned long offset, curoffs;
struct yaffs_obj *l;
int ret_val = 0;
char name[YAFFS_MAX_NAME_LENGTH + 1];
- obj = yaffs_dentry_to_obj(f->f_dentry);
+ obj = yaffs_dentry_to_obj(f->f_path.dentry);
dev = obj->my_dev;
yaffs_gross_lock(dev);
@@ -1839,10 +1839,10 @@ static int yaffs_readdir(struct file *f,
if (offset == 1) {
yaffs_trace(YAFFS_TRACE_OS,
"yaffs_readdir: entry .. ino %d",
- (int)f->f_dentry->d_parent->d_inode->i_ino);
+ (int)f->f_path.dentry->d_parent->d_inode->i_ino);
yaffs_gross_unlock(dev);
if (filldir(dirent, "..", 2, offset,
- f->f_dentry->d_parent->d_inode->i_ino,
+ f->f_path.dentry->d_parent->d_inode->i_ino,
DT_DIR) < 0) {
yaffs_gross_lock(dev);
goto out;

View file

@ -0,0 +1,25 @@
From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001
From: Jonas Gorski <jonas.gorski@gmail.com>
Date: Tue, 12 Apr 2011 19:55:41 +0200
Subject: [PATCH] squashfs: update xz compressor options struct.
Update the xz compressor options struct to match the squashfs userspace
one.
---
fs/squashfs/xz_wrapper.c | 4 +++-
1 files changed, 3 insertions(+), 1 deletions(-)
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -40,8 +40,10 @@ struct squashfs_xz {
};
struct disk_comp_opts {
- __le32 dictionary_size;
__le32 flags;
+ __le16 bit_opts;
+ __le16 fb;
+ __le32 dictionary_size;
};
struct comp_opts {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,56 @@
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -114,6 +114,16 @@ static int jffs2_build_filesystem(struct
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
+ if (c->flags & (1 << 7)) {
+ printk("%s(): unlocking the mtd device... ", __func__);
+ mtd_unlock(c->mtd, 0, c->mtd->size);
+ printk("done.\n");
+
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
+ jffs2_erase_pending_blocks(c, -1);
+ printk("done.\n");
+ }
+
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
- buf_size, s);
+ if (c->flags & (1 << 7)) {
+ if (mtd_block_isbad(c->mtd, jeb->offset))
+ ret = BLK_STATE_BADBLOCK;
+ else
+ ret = BLK_STATE_ALLFF;
+ } else
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
if (ret < 0)
goto out;
@@ -561,6 +567,17 @@ full_scan:
return err;
}
+ if ((buf[0] == 0xde) &&
+ (buf[1] == 0xad) &&
+ (buf[2] == 0xc0) &&
+ (buf[3] == 0xde)) {
+ /* end of filesystem. erase everything after this point */
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+ c->flags |= (1 << 7);
+
+ return BLK_STATE_ALLFF;
+ }
+
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;
max_ofs = EMPTY_SCAN_SIZE(c->sector_size);

View file

@ -0,0 +1,146 @@
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1536,6 +1536,13 @@ config CRYPTO_LZ4HC
help
This is the LZ4 high compression mode algorithm.
+config CRYPTO_XZ
+ tristate "XZ compression algorithm"
+ select CRYPTO_ALGAPI
+ select XZ_DEC
+ help
+ This is the XZ algorithm. Only decompression is supported for now.
+
comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
+obj-$(CONFIG_CRYPTO_XZ) += xz.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
--- /dev/null
+++ b/crypto/xz.c
@@ -0,0 +1,117 @@
+/*
+ * Cryptographic API.
+ *
+ * XZ decompression support.
+ *
+ * Copyright (c) 2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xz.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+
+struct xz_comp_ctx {
+ struct xz_dec *decomp_state;
+ struct xz_buf decomp_buf;
+};
+
+static int crypto_xz_decomp_init(struct xz_comp_ctx *ctx)
+{
+ ctx->decomp_state = xz_dec_init(XZ_SINGLE, 0);
+ if (!ctx->decomp_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void crypto_xz_decomp_exit(struct xz_comp_ctx *ctx)
+{
+ xz_dec_end(ctx->decomp_state);
+}
+
+static int crypto_xz_init(struct crypto_tfm *tfm)
+{
+ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_xz_decomp_init(ctx);
+}
+
+static void crypto_xz_exit(struct crypto_tfm *tfm)
+{
+ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_xz_decomp_exit(ctx);
+}
+
+static int crypto_xz_compress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int crypto_xz_decompress(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+ struct xz_comp_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct xz_buf *xz_buf = &dctx->decomp_buf;
+ int ret;
+
+ memset(xz_buf, '\0', sizeof(struct xz_buf));
+
+ xz_buf->in = (u8 *) src;
+ xz_buf->in_pos = 0;
+ xz_buf->in_size = slen;
+ xz_buf->out = (u8 *) dst;
+ xz_buf->out_pos = 0;
+ xz_buf->out_size = *dlen;
+
+ ret = xz_dec_run(dctx->decomp_state, xz_buf);
+ if (ret != XZ_STREAM_END) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *dlen = xz_buf->out_pos;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static struct crypto_alg crypto_xz_alg = {
+ .cra_name = "xz",
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct xz_comp_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(crypto_xz_alg.cra_list),
+ .cra_init = crypto_xz_init,
+ .cra_exit = crypto_xz_exit,
+ .cra_u = { .compress = {
+ .coa_compress = crypto_xz_compress,
+ .coa_decompress = crypto_xz_decompress } }
+};
+
+static int __init crypto_xz_mod_init(void)
+{
+ return crypto_register_alg(&crypto_xz_alg);
+}
+
+static void __exit crypto_xz_mod_exit(void)
+{
+ crypto_unregister_alg(&crypto_xz_alg);
+}
+
+module_init(crypto_xz_mod_init);
+module_exit(crypto_xz_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto XZ decompression support");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");

View file

@ -0,0 +1,92 @@
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -5,8 +5,10 @@ config UBIFS_FS
select CRYPTO if UBIFS_FS_ADVANCED_COMPR
select CRYPTO if UBIFS_FS_LZO
select CRYPTO if UBIFS_FS_ZLIB
+ select CRYPTO if UBIFS_FS_XZ
select CRYPTO_LZO if UBIFS_FS_LZO
select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
+ select CRYPTO_XZ if UBIFS_FS_XZ
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.
@@ -35,3 +37,12 @@ config UBIFS_FS_ZLIB
default y
help
Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
+
+config UBIFS_FS_XZ
+ bool "XZ decompression support" if UBIFS_FS_ADVANCED_COMPR
+ depends on UBIFS_FS
+ default y
+ help
+ XZ compresses better the ZLIB but it is slower..
+ Say 'Y' if unsure.
+
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -71,6 +71,24 @@ static struct ubifs_compressor zlib_comp
};
#endif
+#ifdef CONFIG_UBIFS_FS_XZ
+static DEFINE_MUTEX(xz_enc_mutex);
+static DEFINE_MUTEX(xz_dec_mutex);
+
+static struct ubifs_compressor xz_compr = {
+ .compr_type = UBIFS_COMPR_XZ,
+ .comp_mutex = &xz_enc_mutex,
+ .decomp_mutex = &xz_dec_mutex,
+ .name = "xz",
+ .capi_name = "xz",
+};
+#else
+static struct ubifs_compressor xz_compr = {
+ .compr_type = UBIFS_COMPR_XZ,
+ .name = "xz",
+};
+#endif
+
/* All UBIFS compressors */
struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
@@ -232,9 +250,15 @@ int __init ubifs_compressors_init(void)
if (err)
goto out_lzo;
+ err = compr_init(&xz_compr);
+ if (err)
+ goto out_zlib;
+
ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr;
return 0;
+out_zlib:
+ compr_exit(&zlib_compr);
out_lzo:
compr_exit(&lzo_compr);
return err;
@@ -247,4 +271,5 @@ void ubifs_compressors_exit(void)
{
compr_exit(&lzo_compr);
compr_exit(&zlib_compr);
+ compr_exit(&xz_compr);
}
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -332,12 +332,14 @@ enum {
* UBIFS_COMPR_NONE: no compression
* UBIFS_COMPR_LZO: LZO compression
* UBIFS_COMPR_ZLIB: ZLIB compression
+ * UBIFS_COMPR_XZ: XZ compression
* UBIFS_COMPR_TYPES_CNT: count of supported compression types
*/
enum {
UBIFS_COMPR_NONE,
UBIFS_COMPR_LZO,
UBIFS_COMPR_ZLIB,
+ UBIFS_COMPR_XZ,
UBIFS_COMPR_TYPES_CNT,
};

View file

@ -0,0 +1,29 @@
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -63,6 +63,17 @@
/* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000
+static int get_default_compressor(void)
+{
+ if (ubifs_compr_present(UBIFS_COMPR_LZO))
+ return UBIFS_COMPR_LZO;
+
+ if (ubifs_compr_present(UBIFS_COMPR_ZLIB))
+ return UBIFS_COMPR_ZLIB;
+
+ return UBIFS_COMPR_NONE;
+}
+
/**
* create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object
@@ -183,7 +194,7 @@ static int create_default_filesystem(str
if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else
- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
+ sup->default_compr = cpu_to_le16(get_default_compressor());
generate_random_uuid(sup->uuid);

View file

@ -0,0 +1,86 @@
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -17,6 +17,7 @@
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <linux/security.h>
+#include <linux/inet.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@@ -288,10 +289,66 @@ static int ct_open(struct inode *inode,
sizeof(struct ct_iter_state));
}
+struct kill_request {
+ u16 family;
+ union nf_inet_addr addr;
+};
+
+static int kill_matching(struct nf_conn *i, void *data)
+{
+ struct kill_request *kr = data;
+ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ if (!kr->family)
+ return 1;
+
+ if (t1->src.l3num != kr->family)
+ return 0;
+
+ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) ||
+ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) ||
+ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) ||
+ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3));
+}
+
+static ssize_t ct_file_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = file->private_data;
+ struct net *net = seq_file_net(seq);
+ struct kill_request kr = { };
+ char req[INET6_ADDRSTRLEN] = { };
+
+ if (count == 0)
+ return 0;
+
+ if (count >= INET6_ADDRSTRLEN)
+ count = INET6_ADDRSTRLEN - 1;
+
+ if (copy_from_user(req, buf, count))
+ return -EFAULT;
+
+ if (strnchr(req, count, ':')) {
+ kr.family = AF_INET6;
+ if (!in6_pton(req, count, (void *)&kr.addr, '\n', NULL))
+ return -EINVAL;
+ } else if (strnchr(req, count, '.')) {
+ kr.family = AF_INET;
+ if (!in4_pton(req, count, (void *)&kr.addr, '\n', NULL))
+ return -EINVAL;
+ }
+
+ nf_ct_iterate_cleanup(net, kill_matching, &kr, 0, 0);
+
+ return count;
+}
+
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
+ .write = ct_file_write,
.llseek = seq_lseek,
.release = seq_release_net,
};
@@ -393,7 +450,7 @@ static int nf_conntrack_standalone_init_
{
struct proc_dir_entry *pde;
- pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
+ pde = proc_create("nf_conntrack", 0660, net->proc_net, &ct_file_ops);
if (!pde)
goto out_nf_conntrack;

View file

@ -0,0 +1,93 @@
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -87,6 +87,7 @@ struct ipt_ip {
#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
#define IPT_F_GOTO 0x02 /* Set if jump is a goto */
#define IPT_F_MASK 0x03 /* All possible flag bits mask. */
+#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
/* Values for "inv" field in struct ipt_ip. */
#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -82,6 +82,9 @@ ip_packet_match(const struct iphdr *ip,
#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
IPT_INV_SRCIP) ||
FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
@@ -135,6 +138,29 @@ ip_packet_match(const struct iphdr *ip,
return true;
}
+static void
+ip_checkdefault(struct ipt_ip *ip)
+{
+ static const char iface_mask[IFNAMSIZ] = {};
+
+ if (ip->invflags || ip->flags & IPT_F_FRAG)
+ return;
+
+ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
+ return;
+
+ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
+ return;
+
+ if (ip->smsk.s_addr || ip->dmsk.s_addr)
+ return;
+
+ if (ip->proto)
+ return;
+
+ ip->flags |= IPT_F_NO_DEF_MATCH;
+}
+
static bool
ip_checkentry(const struct ipt_ip *ip)
{
@@ -568,7 +594,7 @@ static void cleanup_match(struct xt_entr
}
static int
-check_entry(const struct ipt_entry *e, const char *name)
+check_entry(struct ipt_entry *e, const char *name)
{
const struct xt_entry_target *t;
@@ -577,6 +603,8 @@ check_entry(const struct ipt_entry *e, c
return -EINVAL;
}
+ ip_checkdefault(&e->ip);
+
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
return -EINVAL;
@@ -943,6 +971,7 @@ copy_entries_to_user(unsigned int total_
const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
+ u8 flags;
counters = alloc_counters(table);
if (IS_ERR(counters))
@@ -969,6 +998,14 @@ copy_entries_to_user(unsigned int total_
ret = -EFAULT;
goto free_counters;
}
+
+ flags = e->ip.flags & IPT_F_MASK;
+ if (copy_to_user(userptr + off
+ + offsetof(struct ipt_entry, ip.flags),
+ &flags, sizeof(flags)) != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
for (i = sizeof(struct ipt_entry);
i < e->target_offset;

View file

@ -0,0 +1,106 @@
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -308,6 +308,33 @@ struct ipt_entry *ipt_next_entry(const s
return (void *)entry + entry->next_offset;
}
+static bool
+ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
+{
+ struct xt_entry_target *t;
+ struct xt_standard_target *st;
+
+ if (e->target_offset != sizeof(struct ipt_entry))
+ return false;
+
+ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
+ return false;
+
+ t = ipt_get_target(e);
+ if (t->u.kernel.target->target)
+ return false;
+
+ st = (struct xt_standard_target *) t;
+ if (st->verdict == XT_RETURN)
+ return false;
+
+ if (st->verdict >= 0)
+ return false;
+
+ *verdict = (unsigned)(-st->verdict) - 1;
+ return true;
+}
+
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
ipt_do_table(struct sk_buff *skb,
@@ -328,27 +355,8 @@ ipt_do_table(struct sk_buff *skb,
unsigned int addend;
/* Initialization */
- stackidx = 0;
- ip = ip_hdr(skb);
- indev = state->in ? state->in->name : nulldevname;
- outdev = state->out ? state->out->name : nulldevname;
- /* We handle fragments by dealing with the first fragment as
- * if it was a normal packet. All other fragments are treated
- * normally, except that they will NEVER match rules that ask
- * things we don't know, ie. tcp syn flag or ports). If the
- * rule is also a fragment-specific rule, non-fragments won't
- * match it. */
- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
- acpar.thoff = ip_hdrlen(skb);
- acpar.hotdrop = false;
- acpar.in = state->in;
- acpar.out = state->out;
- acpar.family = NFPROTO_IPV4;
- acpar.hooknum = hook;
-
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
local_bh_disable();
- addend = xt_write_recseq_begin();
private = table->private;
cpu = smp_processor_id();
/*
@@ -357,6 +365,20 @@ ipt_do_table(struct sk_buff *skb,
*/
smp_read_barrier_depends();
table_base = private->entries;
+
+ e = get_entry(table_base, private->hook_entry[hook]);
+ if (ipt_handle_default_rule(e, &verdict)) {
+ ADD_COUNTER(e->counters, skb->len, 1);
+ local_bh_enable();
+ return verdict;
+ }
+
+ stackidx = 0;
+ ip = ip_hdr(skb);
+ indev = state->in ? state->in->name : nulldevname;
+ outdev = state->out ? state->out->name : nulldevname;
+
+ addend = xt_write_recseq_begin();
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
/* Switch to alternate jumpstack if we're being invoked via TEE.
@@ -369,7 +391,19 @@ ipt_do_table(struct sk_buff *skb,
if (static_key_false(&xt_tee_enabled))
jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
- e = get_entry(table_base, private->hook_entry[hook]);
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+ * things we don't know, ie. tcp syn flag or ports). If the
+ * rule is also a fragment-specific rule, non-fragments won't
+ * match it. */
+ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+ acpar.thoff = ip_hdrlen(skb);
+ acpar.hotdrop = false;
+ acpar.in = state->in;
+ acpar.out = state->out;
+ acpar.family = NFPROTO_IPV4;
+ acpar.hooknum = hook;
pr_debug("Entering %s(hook %u), UF %p\n",
table->name, hook,

View file

@ -0,0 +1,16 @@
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -85,9 +85,11 @@ ip_packet_match(const struct iphdr *ip,
if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
return true;
- if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ if (FWINV(ipinfo->smsk.s_addr &&
+ (ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
IPT_INV_SRCIP) ||
- FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+ FWINV(ipinfo->dmsk.s_addr &&
+ (ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
IPT_INV_DSTIP)) {
dprintf("Source or dest mismatch.\n");

View file

@ -0,0 +1,36 @@
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -33,6 +33,9 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+/* Do not check the TCP window for incoming packets */
+static int nf_ct_tcp_no_window_check __read_mostly = 1;
+
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
@@ -515,6 +518,9 @@ static bool tcp_in_window(const struct n
s32 receiver_offset;
bool res, in_recv_win;
+ if (nf_ct_tcp_no_window_check)
+ return true;
+
/*
* Get the required data from the packet.
*/
@@ -1481,6 +1487,13 @@ static struct ctl_table tcp_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .data = &nf_ct_tcp_no_window_check,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{ }
};

View file

@ -0,0 +1,95 @@
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -55,6 +55,7 @@ header-y += xt_ecn.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
header-y += xt_helper.h
+header-y += xt_id.h
header-y += xt_ipcomp.h
header-y += xt_iprange.h
header-y += xt_ipvs.h
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_id.h
@@ -0,0 +1,8 @@
+#ifndef _XT_ID_H
+#define _XT_ID_H
+
+struct xt_id_info {
+ u32 id;
+};
+
+#endif /* XT_ID_H */
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1179,6 +1179,13 @@ config NETFILTER_XT_MATCH_IPCOMP
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_ID
+ tristate '"id" match support'
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds a `id' dummy-match, which allows you to put
+ numeric IDs into your iptables ruleset.
+
config NETFILTER_XT_MATCH_IPRANGE
tristate '"iprange" address range match support'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -146,6 +146,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) +=
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_ID) += xt_id.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPCOMP) += xt_ipcomp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
--- /dev/null
+++ b/net/netfilter/xt_id.c
@@ -0,0 +1,45 @@
+/*
+ * Implements a dummy match to allow attaching IDs to rules
+ *
+ * 2014-08-01 Jo-Philipp Wich <jow@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_id.h>
+
+MODULE_AUTHOR("Jo-Philipp Wich <jow@openwrt.org>");
+MODULE_DESCRIPTION("Xtables: No-op match which can be tagged with a 32bit ID");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_id");
+MODULE_ALIAS("ip6t_id");
+
+static bool
+id_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ /* We always match */
+ return true;
+}
+
+static struct xt_match id_mt_reg __read_mostly = {
+ .name = "id",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .match = id_mt,
+ .matchsize = sizeof(struct xt_id_info),
+ .me = THIS_MODULE,
+};
+
+static int __init id_mt_init(void)
+{
+ return xt_register_match(&id_mt_reg);
+}
+
+static void __exit id_mt_exit(void)
+{
+ xt_unregister_match(&id_mt_reg);
+}
+
+module_init(id_mt_init);
+module_exit(id_mt_exit);

View file

@ -0,0 +1,12 @@
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -90,6 +90,9 @@ int nf_xfrm_me_harder(struct sk_buff *sk
struct dst_entry *dst;
int err;
+ if (skb->dev && !dev_net(skb->dev)->xfrm.policy_count[XFRM_POLICY_OUT])
+ return 0;
+
err = xfrm_decode_session(skb, &fl, family);
if (err < 0)
return err;

View file

@ -0,0 +1,791 @@
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -226,6 +226,33 @@ struct tc_sfq_xstats {
__s32 allot;
};
+/* ESFQ section */
+
+enum
+{
+ /* traditional */
+ TCA_SFQ_HASH_CLASSIC,
+ TCA_SFQ_HASH_DST,
+ TCA_SFQ_HASH_SRC,
+ TCA_SFQ_HASH_FWMARK,
+ /* conntrack */
+ TCA_SFQ_HASH_CTORIGDST,
+ TCA_SFQ_HASH_CTORIGSRC,
+ TCA_SFQ_HASH_CTREPLDST,
+ TCA_SFQ_HASH_CTREPLSRC,
+ TCA_SFQ_HASH_CTNATCHG,
+};
+
+struct tc_esfq_qopt
+{
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+ unsigned hash_kind; /* Hash function to use for flow identification */
+};
+
/* RED section */
enum {
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -149,6 +149,37 @@ config NET_SCH_SFQ
To compile this code as a module, choose M here: the
module will be called sch_sfq.
+config NET_SCH_ESFQ
+ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
+ ---help---
+ Say Y here if you want to use the Enhanced Stochastic Fairness
+ Queueing (ESFQ) packet scheduling algorithm for some of your network
+ devices or as a leaf discipline for a classful qdisc such as HTB or
+ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
+ references to the SFQ algorithm).
+
+ This is an enchanced SFQ version which allows you to control some
+ hardcoded values in the SFQ scheduler.
+
+ ESFQ also adds control of the hash function used to identify packet
+ flows. The original SFQ discipline hashes by connection; ESFQ add
+ several other hashing methods, such as by src IP or by dst IP, which
+ can be more fair to users in some networking situations.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_esfq.
+
+config NET_SCH_ESFQ_NFCT
+ bool "Connection Tracking Hash Types"
+ depends on NET_SCH_ESFQ && NF_CONNTRACK
+ ---help---
+ Say Y here to enable support for hashing based on netfilter connection
+ tracking information. This is useful for a router that is also using
+ NAT to connect privately-addressed hosts to the Internet. If you want
+ to provide fair distribution of upstream bandwidth, ESFQ must use
+ connection tracking information, since all outgoing packets will share
+ the same source address.
+
config NET_SCH_TEQL
tristate "True Link Equalizer (TEQL)"
---help---
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_SCH_INGRESS) += sch_ing
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
--- /dev/null
+++ b/net/sched/sch_esfq.c
@@ -0,0 +1,702 @@
+/*
+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes: Alexander Atanasov, <alex@ssi.bg>
+ * Added dynamic depth,limit,divisor,hash_kind options.
+ * Added dst and src hashes.
+ *
+ * Alexander Clouter, <alex@digriz.org.uk>
+ * Ported ESFQ to Linux 2.6.
+ *
+ * Corey Hickey, <bugfood-c@fatooh.org>
+ * Maintenance of the Linux 2.6 port.
+ * Added fwmark hash (thanks to Robert Kurjata).
+ * Added usage of jhash.
+ * Added conntrack support.
+ * Added ctnatchg hash (thanks to Ben Pfountz).
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <net/ip.h>
+#include <net/netlink.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <linux/jhash.h>
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+/* Stochastic Fairness Queuing algorithm.
+ For more comments look at sch_sfq.c.
+ The difference is that you can change limit, depth,
+ hash table size and choose alternate hash types.
+
+ classic: same as in sch_sfq.c
+ dst: destination IP address
+ src: source IP address
+ fwmark: netfilter mark value
+ ctorigdst: original destination IP address
+ ctorigsrc: original source IP address
+ ctrepldst: reply destination IP address
+ ctreplsrc: reply source IP
+
+*/
+
+#define ESFQ_HEAD 0
+#define ESFQ_TAIL 1
+
+/* This type should contain at least SFQ_DEPTH*2 values */
+typedef unsigned int esfq_index;
+
+struct esfq_head
+{
+ esfq_index next;
+ esfq_index prev;
+};
+
+struct esfq_sched_data
+{
+/* Parameters */
+ int perturb_period;
+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ int limit;
+ unsigned depth;
+ unsigned hash_divisor;
+ unsigned hash_kind;
+/* Variables */
+ struct timer_list perturb_timer;
+ int perturbation;
+ esfq_index tail; /* Index of current slot in round */
+ esfq_index max_depth; /* Maximal depth */
+
+ esfq_index *ht; /* Hash table */
+ esfq_index *next; /* Active slots link */
+ short *allot; /* Current allotment per slot */
+ unsigned short *hash; /* Hash value indexed by slots */
+ struct sk_buff_head *qs; /* Slot queue */
+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
+};
+
+/* This contains the info we will hash. */
+struct esfq_packet_info
+{
+ u32 proto; /* protocol or port */
+ u32 src; /* source from packet header */
+ u32 dst; /* destination from packet header */
+ u32 ctorigsrc; /* original source from conntrack */
+ u32 ctorigdst; /* original destination from conntrack */
+ u32 ctreplsrc; /* reply source from conntrack */
+ u32 ctrepldst; /* reply destination from conntrack */
+ u32 mark; /* netfilter mark (fwmark) */
+};
+
+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
+{
+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
+{
+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
+{
+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
+}
+
+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
+{
+ struct esfq_packet_info info;
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+#endif
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ {
+ struct iphdr *iph = ip_hdr(skb);
+ info.dst = iph->daddr;
+ info.src = iph->saddr;
+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
+ (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP ||
+ iph->protocol == IPPROTO_SCTP ||
+ iph->protocol == IPPROTO_DCCP ||
+ iph->protocol == IPPROTO_ESP))
+ info.proto = *(((u32*)iph) + iph->ihl);
+ else
+ info.proto = iph->protocol;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6):
+ {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ /* Hash ipv6 addresses into a u32. This isn't ideal,
+ * but the code is simple. */
+ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
+ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
+ if (iph->nexthdr == IPPROTO_TCP ||
+ iph->nexthdr == IPPROTO_UDP ||
+ iph->nexthdr == IPPROTO_SCTP ||
+ iph->nexthdr == IPPROTO_DCCP ||
+ iph->nexthdr == IPPROTO_ESP)
+ info.proto = *(u32*)&iph[1];
+ else
+ info.proto = iph->nexthdr;
+ break;
+ }
+ default:
+ info.dst = (u32)(unsigned long)skb_dst(skb);
+ info.src = (u32)(unsigned long)skb->sk;
+ info.proto = skb->protocol;
+ }
+
+ info.mark = skb->mark;
+
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ /* defaults if there is no conntrack info */
+ info.ctorigsrc = info.src;
+ info.ctorigdst = info.dst;
+ info.ctreplsrc = info.dst;
+ info.ctrepldst = info.src;
+ /* collect conntrack info */
+ if (ct && ct != &nf_conntrack_untracked) {
+ if (skb->protocol == __constant_htons(ETH_P_IP)) {
+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ }
+ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+ /* Again, hash ipv6 addresses into a single u32. */
+ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
+ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
+ }
+
+ }
+#endif
+
+ switch(q->hash_kind) {
+ case TCA_SFQ_HASH_CLASSIC:
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+ case TCA_SFQ_HASH_DST:
+ return esfq_jhash_1word(q, info.dst);
+ case TCA_SFQ_HASH_SRC:
+ return esfq_jhash_1word(q, info.src);
+ case TCA_SFQ_HASH_FWMARK:
+ return esfq_jhash_1word(q, info.mark);
+#ifdef CONFIG_NET_SCH_ESFQ_NFCT
+ case TCA_SFQ_HASH_CTORIGDST:
+ return esfq_jhash_1word(q, info.ctorigdst);
+ case TCA_SFQ_HASH_CTORIGSRC:
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ case TCA_SFQ_HASH_CTREPLDST:
+ return esfq_jhash_1word(q, info.ctrepldst);
+ case TCA_SFQ_HASH_CTREPLSRC:
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ case TCA_SFQ_HASH_CTNATCHG:
+ {
+ if (info.ctorigdst == info.ctreplsrc)
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ return esfq_jhash_1word(q, info.ctreplsrc);
+ }
+#endif
+ default:
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
+ }
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+}
+
+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d = q->qs[x].qlen + q->depth;
+
+ p = d;
+ n = q->dep[d].next;
+ q->dep[x].next = n;
+ q->dep[x].prev = p;
+ q->dep[p].next = q->dep[n].prev = x;
+}
+
+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+
+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
+ q->max_depth--;
+
+ esfq_link(q, x);
+}
+
+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+ d = q->qs[x].qlen;
+ if (q->max_depth < d)
+ q->max_depth = d;
+
+ esfq_link(q, x);
+}
+
+static unsigned int esfq_drop(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index d = q->max_depth;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ /* Queue is full! Find the longest slot and
+ drop a packet from it */
+
+ if (d > 1) {
+ esfq_index x = q->dep[d+q->depth].next;
+ skb = q->qs[x].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[x]);
+ kfree_skb(skb);
+ esfq_dec(q, x);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ if (d == 1) {
+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
+ d = q->next[q->tail];
+ q->next[q->tail] = q->next[d];
+ q->allot[q->next[d]] += q->quantum;
+ skb = q->qs[d].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[d]);
+ kfree_skb(skb);
+ esfq_dec(q, d);
+ sch->q.qlen--;
+ q->ht[q->hash[d]] = q->depth;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ return 0;
+}
+
+static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end)
+{
+ unsigned hash = esfq_hash(q, skb);
+ unsigned depth = q->depth;
+ esfq_index x;
+
+ x = q->ht[hash];
+ if (x == depth) {
+ q->ht[hash] = x = q->dep[depth].next;
+ q->hash[x] = hash;
+ }
+
+ if (end == ESFQ_TAIL)
+ __skb_queue_tail(&q->qs[x], skb);
+ else
+ __skb_queue_head(&q->qs[x], skb);
+
+ esfq_inc(q, x);
+ if (q->qs[x].qlen == 1) { /* The flow is new */
+ if (q->tail == depth) { /* It is the first flow */
+ q->tail = x;
+ q->next[x] = x;
+ q->allot[x] = q->quantum;
+ } else {
+ q->next[x] = q->next[q->tail];
+ q->next[q->tail] = x;
+ q->tail = x;
+ }
+ }
+}
+
+static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_enqueue(skb, q, ESFQ_TAIL);
+ sch->qstats.backlog += skb->len;
+ if (++sch->q.qlen < q->limit-1) {
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+ return 0;
+ }
+
+ sch->qstats.drops++;
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+static struct sk_buff *esfq_peek(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index a;
+
+ /* No active slots */
+ if (q->tail == q->depth)
+ return NULL;
+
+ a = q->next[q->tail];
+ return skb_peek(&q->qs[a]);
+}
+
+static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q)
+{
+ struct sk_buff *skb;
+ unsigned depth = q->depth;
+ esfq_index a, old_a;
+
+ /* No active slots */
+ if (q->tail == depth)
+ return NULL;
+
+ a = old_a = q->next[q->tail];
+
+ /* Grab packet */
+ skb = __skb_dequeue(&q->qs[a]);
+ esfq_dec(q, a);
+
+ /* Is the slot empty? */
+ if (q->qs[a].qlen == 0) {
+ q->ht[q->hash[a]] = depth;
+ a = q->next[a];
+ if (a == old_a) {
+ q->tail = depth;
+ return skb;
+ }
+ q->next[q->tail] = a;
+ q->allot[a] += q->quantum;
+ } else if ((q->allot[a] -= skb->len) <= 0) {
+ q->tail = a;
+ a = q->next[a];
+ q->allot[a] += q->quantum;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *esfq_dequeue(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = esfq_q_dequeue(q);
+ if (skb == NULL)
+ return NULL;
+ sch->q.qlen--;
+ sch->qstats.backlog -= skb->len;
+ return skb;
+}
+
+static void esfq_q_destroy(struct esfq_sched_data *q)
+{
+ del_timer(&q->perturb_timer);
+ if(q->ht)
+ kfree(q->ht);
+ if(q->dep)
+ kfree(q->dep);
+ if(q->next)
+ kfree(q->next);
+ if(q->allot)
+ kfree(q->allot);
+ if(q->hash)
+ kfree(q->hash);
+ if(q->qs)
+ kfree(q->qs);
+}
+
+static void esfq_destroy(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_q_destroy(q);
+}
+
+
+static void esfq_reset(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = esfq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void esfq_perturbation(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc*)arg;
+ struct esfq_sched_data *q = qdisc_priv(sch);
+
+ q->perturbation = prandom_u32()&0x1F;
+
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+}
+
+static unsigned int esfq_check_hash(unsigned int kind)
+{
+ switch (kind) {
+ case TCA_SFQ_HASH_CTORIGDST:
+ case TCA_SFQ_HASH_CTORIGSRC:
+ case TCA_SFQ_HASH_CTREPLDST:
+ case TCA_SFQ_HASH_CTREPLSRC:
+ case TCA_SFQ_HASH_CTNATCHG:
+#ifndef CONFIG_NET_SCH_ESFQ_NFCT
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+#endif
+ case TCA_SFQ_HASH_CLASSIC:
+ case TCA_SFQ_HASH_DST:
+ case TCA_SFQ_HASH_SRC:
+ case TCA_SFQ_HASH_FWMARK:
+ return kind;
+ default:
+ {
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n");
+ return TCA_SFQ_HASH_CLASSIC;
+ }
+ }
+}
+
+static int esfq_q_init(struct esfq_sched_data *q, struct nlattr *opt)
+{
+ struct tc_esfq_qopt *ctl = nla_data(opt);
+ esfq_index p = ~0U/2;
+ int i;
+
+ if (opt && opt->nla_len < nla_attr_size(sizeof(*ctl)))
+ return -EINVAL;
+
+ q->perturbation = 0;
+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
+ q->max_depth = 0;
+ if (opt == NULL) {
+ q->perturb_period = 0;
+ q->hash_divisor = 1024;
+ q->tail = q->limit = q->depth = 128;
+
+ } else {
+ struct tc_esfq_qopt *ctl = nla_data(opt);
+ if (ctl->quantum)
+ q->quantum = ctl->quantum;
+ q->perturb_period = ctl->perturb_period*HZ;
+ q->hash_divisor = ctl->divisor ? : 1024;
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
+
+ if ( q->depth > p - 1 )
+ return -EINVAL;
+
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
+
+ if (ctl->hash_kind) {
+ q->hash_kind = esfq_check_hash(ctl->hash_kind);
+ }
+ }
+
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->ht)
+ goto err_case;
+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
+ if (!q->dep)
+ goto err_case;
+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->next)
+ goto err_case;
+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
+ if (!q->allot)
+ goto err_case;
+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
+ if (!q->hash)
+ goto err_case;
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!q->qs)
+ goto err_case;
+
+ for (i=0; i< q->hash_divisor; i++)
+ q->ht[i] = q->depth;
+ for (i=0; i<q->depth; i++) {
+ skb_queue_head_init(&q->qs[i]);
+ q->dep[i+q->depth].next = i+q->depth;
+ q->dep[i+q->depth].prev = i+q->depth;
+ }
+
+ for (i=0; i<q->depth; i++)
+ esfq_link(q, i);
+ return 0;
+err_case:
+ esfq_q_destroy(q);
+ return -ENOBUFS;
+}
+
+static int esfq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ q->quantum = psched_mtu(qdisc_dev(sch)); /* default */
+ if ((err = esfq_q_init(q, opt)))
+ return err;
+
+ init_timer(&q->perturb_timer);
+ q->perturb_timer.data = (unsigned long)sch;
+ q->perturb_timer.function = esfq_perturbation;
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+
+ return 0;
+}
+
+static int esfq_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct esfq_sched_data new;
+ struct sk_buff *skb;
+ int err;
+
+ /* set up new queue */
+ memset(&new, 0, sizeof(struct esfq_sched_data));
+ new.quantum = psched_mtu(qdisc_dev(sch)); /* default */
+ if ((err = esfq_q_init(&new, opt)))
+ return err;
+
+ /* copy all packets from the old queue to the new queue */
+ sch_tree_lock(sch);
+ while ((skb = esfq_q_dequeue(q)) != NULL)
+ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
+
+ /* clean up the old queue */
+ esfq_q_destroy(q);
+
+ /* copy elements of the new queue into the old queue */
+ q->perturb_period = new.perturb_period;
+ q->quantum = new.quantum;
+ q->limit = new.limit;
+ q->depth = new.depth;
+ q->hash_divisor = new.hash_divisor;
+ q->hash_kind = new.hash_kind;
+ q->tail = new.tail;
+ q->max_depth = new.max_depth;
+ q->ht = new.ht;
+ q->dep = new.dep;
+ q->next = new.next;
+ q->allot = new.allot;
+ q->hash = new.hash;
+ q->qs = new.qs;
+
+ /* finish up */
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ } else {
+ q->perturbation = 0;
+ }
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_esfq_qopt opt;
+
+ opt.quantum = q->quantum;
+ opt.perturb_period = q->perturb_period/HZ;
+
+ opt.limit = q->limit;
+ opt.divisor = q->hash_divisor;
+ opt.flows = q->depth;
+ opt.hash_kind = q->hash_kind;
+
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct Qdisc_ops esfq_qdisc_ops =
+{
+ .next = NULL,
+ .cl_ops = NULL,
+ .id = "esfq",
+ .priv_size = sizeof(struct esfq_sched_data),
+ .enqueue = esfq_enqueue,
+ .dequeue = esfq_dequeue,
+ .peek = esfq_peek,
+ .drop = esfq_drop,
+ .init = esfq_init,
+ .reset = esfq_reset,
+ .destroy = esfq_destroy,
+ .change = esfq_change,
+ .dump = esfq_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init esfq_module_init(void)
+{
+ return register_qdisc(&esfq_qdisc_ops);
+}
+static void __exit esfq_module_exit(void)
+{
+ unregister_qdisc(&esfq_qdisc_ops);
+}
+module_init(esfq_module_init)
+module_exit(esfq_module_exit)
+MODULE_LICENSE("GPL");

Some files were not shown because too many files have changed in this diff Show more