openwrtv4/package/madwifi/patches/342-performance.patch

264 lines
8.3 KiB
Diff
Raw Normal View History

--- a/ath/if_ath.c
+++ b/ath/if_ath.c
@@ -3239,7 +3239,6 @@ ath_hardstart(struct sk_buff *skb, struc
struct ath_softc *sc = dev->priv;
struct ieee80211_node *ni = NULL;
struct ath_buf *bf = NULL;
- struct ether_header *eh;
ath_bufhead bf_head;
struct ath_buf *tbf, *tempbf;
struct sk_buff *tskb;
@@ -3251,6 +3250,7 @@ ath_hardstart(struct sk_buff *skb, struc
*/
int requeue = 0;
#ifdef ATH_SUPERG_FF
+ struct ether_header *eh;
unsigned int pktlen;
struct ieee80211com *ic = &sc->sc_ic;
struct ath_node *an;
@@ -3316,27 +3316,9 @@ ath_hardstart(struct sk_buff *skb, struc
requeue = 1;
goto hardstart_fail;
}
-#endif
- /* If the skb data is shared, we will copy it so we can strip padding
- * without affecting any other bridge ports. */
- if (skb_cloned(skb)) {
- /* Remember the original SKB so we can free up our references */
- struct sk_buff *skb_new;
- skb_new = skb_copy(skb, GFP_ATOMIC);
- if (skb_new == NULL) {
- DPRINTF(sc, ATH_DEBUG_XMIT,
- "Dropping; skb_copy failure.\n");
- /* No free RAM, do not requeue! */
- goto hardstart_fail;
- }
- ieee80211_skb_copy_noderef(skb, skb_new);
- ieee80211_dev_kfree_skb(&skb);
- skb = skb_new;
- }
eh = (struct ether_header *)skb->data;
-#ifdef ATH_SUPERG_FF
/* NB: use this lock to protect an->an_tx_ffbuf (and txq->axq_stageq)
* in athff_can_aggregate() call too. */
ATH_TXQ_LOCK_IRQ(txq);
--- a/net80211/ieee80211_output.c
+++ b/net80211/ieee80211_output.c
@@ -283,7 +283,7 @@ ieee80211_hardstart(struct sk_buff *skb,
* normal vap. */
if (vap->iv_xrvap && (ni == vap->iv_bss) &&
vap->iv_xrvap->iv_sta_assoc) {
- struct sk_buff *skb1 = skb_copy(skb, GFP_ATOMIC);
+ struct sk_buff *skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1) {
memset(SKB_CB(skb1), 0, sizeof(struct ieee80211_cb));
#ifdef IEEE80211_DEBUG_REFCNT
@@ -566,7 +566,7 @@ ieee80211_skbhdr_adjust(struct ieee80211
struct ieee80211_key *key, struct sk_buff *skb, int ismulticast)
{
/* XXX pre-calculate per node? */
- int need_headroom = LLC_SNAPFRAMELEN + hdrsize + IEEE80211_ADDR_LEN;
+ int need_headroom = LLC_SNAPFRAMELEN + hdrsize;
int need_tailroom = 0;
#ifdef ATH_SUPERG_FF
int isff = ATH_FF_MAGIC_PRESENT(skb);
@@ -608,109 +608,56 @@ ieee80211_skbhdr_adjust(struct ieee80211
need_tailroom += cip->ic_miclen;
}
- if (skb_shared(skb)) {
- /* Take our own reference to the node in the clone */
- ieee80211_ref_node(SKB_CB(skb)->ni);
- /* Unshare the node, decrementing users in the old skb */
- skb = skb_unshare(skb, GFP_ATOMIC);
- }
+ need_headroom -= skb_headroom(skb);
+ if (isff)
+ need_tailroom -= skb_tailroom(skb2);
+ else
+ need_tailroom -= skb_tailroom(skb);
+
+ if (need_headroom < 0)
+ need_headroom = 0;
+ if (need_tailroom < 0)
+ need_tailroom = 0;
-#ifdef ATH_SUPERG_FF
- if (isff) {
- if (skb == NULL) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot unshare for encapsulation\n",
- __func__);
- vap->iv_stats.is_tx_nobuf++;
- ieee80211_dev_kfree_skb(&skb2);
+ if (skb_cloned(skb) || (need_headroom > 0) ||
+ (!isff && (need_tailroom > 0))) {
- return NULL;
+ if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "%s: cannot expand storage (tail)\n", __func__);
+ goto error;
}
+ }
- /* first skb header */
- if (skb_headroom(skb) < need_headroom) {
- struct sk_buff *tmp = skb;
- skb = skb_realloc_headroom(skb, need_headroom);
- if (skb == NULL) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot expand storage (head1)\n",
- __func__);
- vap->iv_stats.is_tx_nobuf++;
- ieee80211_dev_kfree_skb(&skb2);
- return NULL;
- } else
- ieee80211_skb_copy_noderef(tmp, skb);
- ieee80211_dev_kfree_skb(&tmp);
- /* NB: cb[] area was copied, but not next ptr. must do that
- * prior to return on success. */
- }
+#ifdef ATH_SUPERG_FF
+ if (isff) {
+ inter_headroom -= skb_headroom(skb2);
+ if (inter_headroom < 0)
+ inter_headroom = 0;
+ if ((skb_cloned(skb2) ||
+ (inter_headroom > 0) || (need_tailroom > 0))) {
- /* second skb with header and tail adjustments possible */
- if (skb_tailroom(skb2) < need_tailroom) {
- int n = 0;
- if (inter_headroom > skb_headroom(skb2))
- n = inter_headroom - skb_headroom(skb2);
- if (pskb_expand_head(skb2, n,
- need_tailroom - skb_tailroom(skb2), GFP_ATOMIC)) {
- ieee80211_dev_kfree_skb(&skb2);
+ if (pskb_expand_head(skb2, inter_headroom,
+ need_tailroom, GFP_ATOMIC)) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot expand storage (tail2)\n",
- __func__);
- vap->iv_stats.is_tx_nobuf++;
- /* this shouldn't happen, but don't send first ff either */
- ieee80211_dev_kfree_skb(&skb);
+ "%s: cannot expand storage (tail)\n", __func__);
+ goto error;
}
- } else if (skb_headroom(skb2) < inter_headroom) {
- struct sk_buff *tmp = skb2;
-
- skb2 = skb_realloc_headroom(skb2, inter_headroom);
- if (skb2 == NULL) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot expand storage (head2)\n",
- __func__);
- vap->iv_stats.is_tx_nobuf++;
- /* this shouldn't happen, but don't send first ff either */
- ieee80211_dev_kfree_skb(&skb);
- skb = NULL;
- } else
- ieee80211_skb_copy_noderef(tmp, skb);
- ieee80211_dev_kfree_skb(&tmp);
- }
- if (skb) {
- skb->next = skb2;
}
- return skb;
+ skb->next = skb2;
}
#endif /* ATH_SUPERG_FF */
- if (skb == NULL) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot unshare for encapsulation\n", __func__);
- vap->iv_stats.is_tx_nobuf++;
- } else if (skb_tailroom(skb) < need_tailroom) {
- int n = 0;
- if (need_headroom > skb_headroom(skb))
- n = need_headroom - skb_headroom(skb);
- if (pskb_expand_head(skb, n, need_tailroom -
- skb_tailroom(skb), GFP_ATOMIC)) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot expand storage (tail)\n", __func__);
- vap->iv_stats.is_tx_nobuf++;
- ieee80211_dev_kfree_skb(&skb);
- }
- } else if (skb_headroom(skb) < need_headroom) {
- struct sk_buff *tmp = skb;
- skb = skb_realloc_headroom(skb, need_headroom);
- /* Increment reference count after copy */
- if (skb == NULL) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: cannot expand storage (head)\n", __func__);
- vap->iv_stats.is_tx_nobuf++;
- } else
- ieee80211_skb_copy_noderef(tmp, skb);
- ieee80211_dev_kfree_skb(&tmp);
- }
return skb;
+
+error:
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_dev_kfree_skb(&skb);
+#ifdef ATH_SUPERG_FF
+ if (skb2)
+ ieee80211_dev_kfree_skb(&skb2);
+#endif
+ return NULL;
}
#define KEY_UNDEFINED(k) ((k).wk_cipher == &ieee80211_cipher_none)
--- a/net80211/ieee80211_input.c
+++ b/net80211/ieee80211_input.c
@@ -204,7 +204,6 @@ ieee80211_input(struct ieee80211vap * va
struct ieee80211_frame *wh;
struct ieee80211_key *key;
struct ether_header *eh;
- struct sk_buff *skb2;
#ifdef ATH_SUPERG_FF
struct llc *llc;
#endif
@@ -244,20 +243,6 @@ ieee80211_input(struct ieee80211vap * va
vap->iv_stats.is_rx_tooshort++;
goto out;
}
- /* Clone the SKB... we assume somewhere in this driver that we 'own'
- * the skbuff passed into hard start and we do a lot of messing with it
- * but bridges under some cases will not clone for the first pass of skb
- * to a bridge port, but will then clone for subsequent ones. This is
- * odd behavior but it means that if we have trashed the skb we are given
- * then other ports get clones of the residual garbage.
- */
- if ((skb2 = skb_copy(skb, GFP_ATOMIC)) == NULL) {
- vap->iv_devstats.tx_dropped++;
- goto out;
- }
- ieee80211_skb_copy_noderef(skb, skb2);
- ieee80211_dev_kfree_skb(&skb);
- skb = skb2;
/*
* Bit of a cheat here, we use a pointer for a 3-address
@@ -738,7 +723,7 @@ ieee80211_input(struct ieee80211vap * va
/* ether_type must be length as FF frames are always LLC/SNAP encap'd */
frame_len = ntohs(eh_tmp->ether_type);
- skb1 = skb_copy(skb, GFP_ATOMIC);
+ skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1 == NULL)
goto err;
ieee80211_skb_copy_noderef(skb, skb1);
@@ -1137,7 +1122,7 @@ ieee80211_deliver_data(struct ieee80211_
if (ETHER_IS_MULTICAST(eh->ether_dhost) && !netif_queue_stopped(dev)) {
/* Create a SKB for the BSS to send out. */
- skb1 = skb_copy(skb, GFP_ATOMIC);
+ skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
SKB_CB(skb1)->ni = ieee80211_ref_node(vap->iv_bss);
}