ramips: raeth: add tx_info structure

This helps to reduce code size on TX path:

add/remove: 0/0 grow/shrink: 1/4 up/down: 56/-92 (-36)
function                                     old     new   delta
ramips_eth_open                             1048    1104     +56
ramips_eth_tx_housekeeping                   156     148      -8
ramips_ring_free                             224     212     -12
ramips_eth_stop                              420     400     -20
ramips_eth_hard_start_xmit                   480     428     -52

SVN-Revision: 30731
This commit is contained in:
Gabor Juhos 2012-02-26 10:54:10 +00:00
parent 548155ab7d
commit 32e7b1c7d9
2 changed files with 55 additions and 19 deletions

View file

@ -213,6 +213,11 @@ struct ramips_tx_dma {
unsigned int txd4;
} __packed __aligned(4);
struct raeth_tx_info {
struct ramips_tx_dma *tx_desc;
struct sk_buff *tx_skb;
};
struct raeth_int_stats {
unsigned long rx_delayed;
unsigned long tx_delayed;
@ -247,10 +252,10 @@ struct raeth_priv
struct sk_buff *rx_skb[NUM_RX_DESC];
dma_addr_t rx_dma[NUM_RX_DESC];
struct raeth_tx_info *tx_info;
dma_addr_t tx_desc_dma;
struct tasklet_struct tx_housekeeping_tasklet;
struct ramips_tx_dma *tx;
struct sk_buff *tx_skb[NUM_TX_DESC];
unsigned int skb_free_idx;

View file

@ -108,20 +108,25 @@ ramips_ring_setup(struct raeth_priv *re)
int len;
int i;
memset(re->tx_info, 0, NUM_TX_DESC * sizeof(struct raeth_tx_info));
len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
memset(re->tx, 0, len);
for (i = 0; i < NUM_TX_DESC; i++) {
struct raeth_tx_info *txi;
struct ramips_tx_dma *txd;
txd = &re->tx[i];
txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
if (re->tx_skb[i] != NULL) {
txi = &re->tx_info[i];
txi->tx_desc = txd;
if (txi->tx_skb != NULL) {
netdev_warn(re->netdev,
"dirty skb for TX desc %d\n", i);
re->tx_skb[i] = NULL;
txi->tx_skb = NULL;
}
}
@ -153,10 +158,14 @@ ramips_ring_cleanup(struct raeth_priv *re)
dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
MAX_RX_LENGTH, DMA_FROM_DEVICE);
for (i = 0; i < NUM_TX_DESC; i++)
if (re->tx_skb[i]) {
dev_kfree_skb_any(re->tx_skb[i]);
re->tx_skb[i] = NULL;
for (i = 0; i < NUM_TX_DESC; i++) {
struct raeth_tx_info *txi;
txi = &re->tx_info[i];
if (txi->tx_skb) {
dev_kfree_skb_any(txi->tx_skb);
txi->tx_skb = NULL;
}
}
}
@ -559,6 +568,8 @@ ramips_ring_free(struct raeth_priv *re)
dma_free_coherent(&re->netdev->dev, len, re->tx,
re->tx_desc_dma);
}
kfree(re->tx_info);
}
static int
@ -568,6 +579,11 @@ ramips_ring_alloc(struct raeth_priv *re)
int err = -ENOMEM;
int i;
re->tx_info = kzalloc(NUM_TX_DESC * sizeof(struct raeth_tx_info),
GFP_ATOMIC);
if (!re->tx_info)
goto err_cleanup;
/* allocate tx ring */
len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
re->tx = dma_alloc_coherent(&re->netdev->dev, len,
@ -617,6 +633,8 @@ static int
ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct raeth_priv *re = netdev_priv(dev);
struct raeth_tx_info *txi, *txi_next;
struct ramips_tx_dma *txd, *txd_next;
unsigned long tx;
unsigned int tx_next;
dma_addr_t mapped_addr;
@ -641,18 +659,23 @@ ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC;
if ((re->tx_skb[tx]) || (re->tx_skb[tx_next]) ||
!(re->tx[tx].txd2 & TX_DMA_DONE) ||
!(re->tx[tx_next].txd2 & TX_DMA_DONE))
txi = &re->tx_info[tx];
txd = txi->tx_desc;
txi_next = &re->tx_info[tx_next];
txd_next = txi_next->tx_desc;
if ((txi->tx_skb) || (txi_next->tx_skb) ||
!(txd->txd2 & TX_DMA_DONE) ||
!(txd_next->txd2 & TX_DMA_DONE))
goto out;
re->tx[tx].txd1 = (unsigned int) mapped_addr;
re->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
re->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
txi->tx_skb = skb;
txd->txd1 = (unsigned int) mapped_addr;
wmb();
txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
re->tx_skb[tx] = skb;
wmb();
ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
spin_unlock(&re->page_lock);
return NETDEV_TX_OK;
@ -730,10 +753,18 @@ ramips_eth_tx_housekeeping(unsigned long ptr)
struct raeth_priv *re = netdev_priv(dev);
spin_lock(&re->page_lock);
while ((re->tx[re->skb_free_idx].txd2 & TX_DMA_DONE) &&
(re->tx_skb[re->skb_free_idx])) {
dev_kfree_skb_irq(re->tx_skb[re->skb_free_idx]);
re->tx_skb[re->skb_free_idx] = 0;
while (1) {
struct raeth_tx_info *txi;
struct ramips_tx_dma *txd;
txi = &re->tx_info[re->skb_free_idx];
txd = txi->tx_desc;
if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
break;
dev_kfree_skb_irq(txi->tx_skb);
txi->tx_skb = NULL;
re->skb_free_idx++;
if (re->skb_free_idx >= NUM_TX_DESC)
re->skb_free_idx = 0;