ramips: raeth: separate ring allocation and setup

SVN-Revision: 30573
This commit is contained in:
Gabor Juhos 2012-02-16 08:17:50 +00:00
parent d186c17aa6
commit f8a56dc8df

View file

@ -102,6 +102,64 @@ ramips_alloc_skb(struct raeth_priv *re)
return skb; return skb;
} }
static void
ramips_ring_setup(struct raeth_priv *re)
{
int len;
int i;
len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
memset(re->tx, 0, len);
for (i = 0; i < NUM_TX_DESC; i++) {
struct ramips_tx_dma *txd;
txd = &re->tx[i];
txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
if (re->tx_skb[i] != NULL) {
netdev_warn(re->netdev,
"dirty skb for TX desc %d\n", i);
re->tx_skb[i] = NULL;
}
}
len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
memset(re->rx, 0, len);
for (i = 0; i < NUM_RX_DESC; i++) {
dma_addr_t dma_addr;
BUG_ON(re->rx_skb[i] == NULL);
dma_addr = dma_map_single(&re->netdev->dev, re->rx_skb[i]->data,
MAX_RX_LENGTH, DMA_FROM_DEVICE);
re->rx_dma[i] = dma_addr;
re->rx[i].rxd1 = (unsigned int) dma_addr;
re->rx[i].rxd2 = RX_DMA_LSO;
}
/* flush descriptors */
wmb();
}
static void
ramips_ring_cleanup(struct raeth_priv *re)
{
int i;
for (i = 0; i < NUM_RX_DESC; i++)
if (re->rx_skb[i])
dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
MAX_RX_LENGTH, DMA_FROM_DEVICE);
for (i = 0; i < NUM_TX_DESC; i++)
if (re->tx_skb[i]) {
dev_kfree_skb_any(re->tx_skb[i]);
re->tx_skb[i] = NULL;
}
}
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883) #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
#define RAMIPS_MDIO_RETRY 1000 #define RAMIPS_MDIO_RETRY 1000
@ -481,77 +539,63 @@ ramips_phy_stop(struct raeth_priv *re)
#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */ #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
static void static void
ramips_cleanup_dma(struct raeth_priv *re) ramips_ring_free(struct raeth_priv *re)
{ {
int len;
int i; int i;
for (i = 0; i < NUM_RX_DESC; i++) for (i = 0; i < NUM_RX_DESC; i++)
if (re->rx_skb[i]) { if (re->rx_skb[i])
dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
MAX_RX_LENGTH, DMA_FROM_DEVICE);
dev_kfree_skb_any(re->rx_skb[i]); dev_kfree_skb_any(re->rx_skb[i]);
}
if (re->rx) if (re->rx) {
dma_free_coherent(&re->netdev->dev, len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
NUM_RX_DESC * sizeof(struct ramips_rx_dma), dma_free_coherent(&re->netdev->dev, len, re->rx,
re->rx, re->rx_desc_dma); re->rx_desc_dma);
}
if (re->tx) if (re->tx) {
dma_free_coherent(&re->netdev->dev, len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
NUM_TX_DESC * sizeof(struct ramips_tx_dma), dma_free_coherent(&re->netdev->dev, len, re->tx,
re->tx, re->tx_desc_dma); re->tx_desc_dma);
}
} }
static int static int
ramips_alloc_dma(struct raeth_priv *re) ramips_ring_alloc(struct raeth_priv *re)
{ {
int len;
int err = -ENOMEM; int err = -ENOMEM;
int i; int i;
re->skb_free_idx = 0; /* allocate tx ring */
len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
/* setup tx ring */ re->tx = dma_alloc_coherent(&re->netdev->dev, len,
re->tx = dma_alloc_coherent(&re->netdev->dev, &re->tx_desc_dma, GFP_ATOMIC);
NUM_TX_DESC * sizeof(struct ramips_tx_dma),
&re->tx_desc_dma, GFP_ATOMIC);
if (!re->tx) if (!re->tx)
goto err_cleanup; goto err_cleanup;
memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma)); /* allocate rx ring */
for (i = 0; i < NUM_TX_DESC; i++) { len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE; re->rx = dma_alloc_coherent(&re->netdev->dev, len,
re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
}
/* setup rx ring */
re->rx = dma_alloc_coherent(&re->netdev->dev,
NUM_RX_DESC * sizeof(struct ramips_rx_dma),
&re->rx_desc_dma, GFP_ATOMIC); &re->rx_desc_dma, GFP_ATOMIC);
if (!re->rx) if (!re->rx)
goto err_cleanup; goto err_cleanup;
memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for (i = 0; i < NUM_RX_DESC; i++) { for (i = 0; i < NUM_RX_DESC; i++) {
dma_addr_t dma_addr; struct sk_buff *skb;
struct sk_buff *new_skb;
new_skb = ramips_alloc_skb(re); skb = ramips_alloc_skb(re);
if (!new_skb) if (!skb)
goto err_cleanup; goto err_cleanup;
dma_addr = dma_map_single(&re->netdev->dev, new_skb->data, re->rx_skb[i] = skb;
MAX_RX_LENGTH, DMA_FROM_DEVICE);
re->rx_dma[i] = dma_addr;
re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
re->rx[i].rxd2 |= RX_DMA_LSO;
re->rx_skb[i] = new_skb;
} }
return 0; return 0;
err_cleanup: err_cleanup:
ramips_cleanup_dma(re); ramips_ring_free(re);
return err; return err;
} }
@ -739,10 +783,11 @@ ramips_eth_open(struct net_device *dev)
if (err) if (err)
return err; return err;
err = ramips_alloc_dma(re); err = ramips_ring_alloc(re);
if (err) if (err)
goto err_free_irq; goto err_free_irq;
ramips_ring_setup(re);
ramips_hw_set_macaddr(dev->dev_addr); ramips_hw_set_macaddr(dev->dev_addr);
ramips_setup_dma(re); ramips_setup_dma(re);
@ -798,7 +843,8 @@ ramips_eth_stop(struct net_device *dev)
netif_stop_queue(dev); netif_stop_queue(dev);
tasklet_kill(&re->tx_housekeeping_tasklet); tasklet_kill(&re->tx_housekeeping_tasklet);
tasklet_kill(&re->rx_tasklet); tasklet_kill(&re->rx_tasklet);
ramips_cleanup_dma(re); ramips_ring_cleanup(re);
ramips_ring_free(re);
RADEBUG("ramips_eth: stopped\n"); RADEBUG("ramips_eth: stopped\n");
return 0; return 0;
} }