openwrtv4/target/linux/mediatek/patches-4.4/0081-net-next-mediatek-fix-DQL-support.patch
John Crispin f5f173e2b7 mediatek: update patches
* fixes NAND
* adds latest ethernet patches

Signed-off-by: John Crispin <john@phrozen.org>
2016-05-23 11:20:20 +02:00

97 lines
3.1 KiB
Diff

From 81cdbda2a08375b9d5915567d2210bf2433e7332 Mon Sep 17 00:00:00 2001
From: John Crispin <john@phrozen.org>
Date: Sat, 23 Apr 2016 11:57:21 +0200
Subject: [PATCH 081/102] net-next: mediatek: fix DQL support
The MTK ethernet core has 2 MACs both sitting on the same DMA ring. The
current code will assign the TX traffic of each MAC to its own DQL. This
results in the amount of data, that DQL says is in the queue incorrect. As
the data from multiple devices is infact enqueued. This makes any decision
based on these value non deterministic. Fix this by tracking all TX
traffic, regardless of the MAC it belongs to in the DQL of all devices
using the DMA.
Signed-off-by: John Crispin <john@phrozen.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 33 ++++++++++++++++-----------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 76ecb1b..feedd5a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -656,7 +656,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
- netdev_sent_queue(dev, skb->len);
+ /* we have a single DMA ring so BQL needs to be updated for all devices
+ * sitting on this ring
+ */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ netdev_sent_queue(eth->netdev[i], skb->len);
+ }
+
skb_tx_timestamp(skb);
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
@@ -884,21 +893,18 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, done = 0;
+ unsigned int bytes = 0;
u32 cpu, dma;
static int condition;
int i;
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
-
cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
desc = mtk_qdma_phys_to_virt(ring, cpu);
- while ((cpu != dma) && budget) {
+ while ((cpu != dma) && done < budget) {
u32 next_cpu = desc->txd2;
int mac;
@@ -918,9 +924,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
}
if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
- budget--;
+ bytes += skb->len;
+ done++;
}
mtk_tx_unmap(eth->dev, tx_buf);
@@ -933,11 +938,13 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ /* we have a single DMA ring so BQL needs to be updated for all devices
+ * sitting on this ring
+ */
for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] || !done[i])
+ if (!eth->netdev[i])
continue;
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
- total += done[i];
+ netdev_completed_queue(eth->netdev[i], done, bytes);
}
/* read hw index again make sure no new tx packet */
--
1.7.10.4