cns3xxx: ethernet - revert: clean up tx descs only when needed

This reverts commit 0772ab938c0aedd7f4cc7127059d6ce8cf929dfa.

Trying to optimize calls to eth_complete_tx in this fasion causes a regression
where when sending only the tx queue can get disabled until a packet is
received. This original call to eth_schedule_poll() is scheduled so it
should not cause a performance issue.

Signed-off-by: Tim Harvey <tharvey@gateworks.com>

SVN-Revision: 40592
This commit is contained in:
Felix Fietkau 2014-04-29 15:52:12 +00:00
parent 5438bc68b5
commit 2603c9045d

View file

@ -583,7 +583,7 @@ static void eth_check_num_used(struct _tx_ring *tx_ring)
}
}
static int eth_complete_tx(struct sw *sw)
static void eth_complete_tx(struct sw *sw)
{
struct _tx_ring *tx_ring = &sw->tx_ring;
struct tx_desc *desc;
@ -615,8 +615,6 @@ static int eth_complete_tx(struct sw *sw)
tx_ring->free_index = index;
tx_ring->num_used -= i;
eth_check_num_used(tx_ring);
return TX_DESCS - tx_ring->num_used;
}
static int eth_poll(struct napi_struct *napi, int budget)
@ -778,13 +776,11 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_walk_frags(skb, skb1)
nr_desc++;
eth_schedule_poll(sw);
spin_lock_bh(&tx_lock);
if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
/* clean up tx descriptors when needed */
if (eth_complete_tx(sw) < nr_desc) {
spin_unlock_bh(&tx_lock);
return NETDEV_TX_BUSY;
}
spin_unlock_bh(&tx_lock);
return NETDEV_TX_BUSY;
}
index = index0 = tx_ring->cur_index;