f687ab2101
This is important patch for new devices that support unaligned addressing. That devices suffer from the backward-compatibility bug in DMA engine. In theory we should be able to use old mechanism, but in practice DMA address seems to be randomly copied into status register when hardware reaches end of a ring. This breaks reading slot number from status register and we can't use DMA anymore. Signed-off-by: Rafał Miłecki <zajec5@gmail.com> SVN-Revision: 38004
140 lines
5.2 KiB
Diff
140 lines
5.2 KiB
Diff
bgmac: implement unaligned addressing for DMA rings that support it
|
|
|
|
This is important patch for new devices that support unaligned
|
|
addressing. That devices suffer from the backward-compatibility bug in
|
|
DMA engine. In theory we should be able to use old mechanism, but in
|
|
practice DMA address seems to be randomly copied into status register
|
|
when hardware reaches end of a ring. This breaks reading slot number
|
|
from status register and we can't use DMA anymore.
|
|
|
|
Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
|
|
|
|
--- a/drivers/net/ethernet/broadcom/bgmac.c
|
|
+++ b/drivers/net/ethernet/broadcom/bgmac.c
|
|
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
|
|
if (++ring->end >= BGMAC_TX_RING_SLOTS)
|
|
ring->end = 0;
|
|
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
|
|
+ ring->index_base +
|
|
ring->end * sizeof(struct bgmac_dma_desc));
|
|
|
|
/* Always keep one slot free to allow detecting bugged calls. */
|
|
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgm
|
|
/* The last slot that hardware didn't consume yet */
|
|
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
|
|
empty_slot &= BGMAC_DMA_TX_STATDPTR;
|
|
+ empty_slot -= ring->index_base;
|
|
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
|
|
empty_slot /= sizeof(struct bgmac_dma_desc);
|
|
|
|
while (ring->start != empty_slot) {
|
|
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgma
|
|
|
|
end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
|
|
end_slot &= BGMAC_DMA_RX_STATDPTR;
|
|
+ end_slot -= ring->index_base;
|
|
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
|
|
end_slot /= sizeof(struct bgmac_dma_desc);
|
|
|
|
ring->end = end_slot;
|
|
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac
|
|
ring = &bgmac->tx_ring[i];
|
|
ring->num_slots = BGMAC_TX_RING_SLOTS;
|
|
ring->mmio_base = ring_base[i];
|
|
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
|
|
- bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
|
|
- ring->mmio_base);
|
|
|
|
/* Alloc ring of descriptors */
|
|
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
|
|
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac
|
|
if (ring->dma_base & 0xC0000000)
|
|
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
|
|
|
|
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
|
|
+ BGMAC_DMA_RING_TX);
|
|
+ if (ring->unaligned)
|
|
+ ring->index_base = lower_32_bits(ring->dma_base);
|
|
+ else
|
|
+ ring->index_base = 0;
|
|
+
|
|
/* No need to alloc TX slots yet */
|
|
}
|
|
|
|
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac
|
|
ring = &bgmac->rx_ring[i];
|
|
ring->num_slots = BGMAC_RX_RING_SLOTS;
|
|
ring->mmio_base = ring_base[i];
|
|
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
|
|
- bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
|
|
- ring->mmio_base);
|
|
|
|
/* Alloc ring of descriptors */
|
|
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
|
|
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac
|
|
if (ring->dma_base & 0xC0000000)
|
|
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
|
|
|
|
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
|
|
+ BGMAC_DMA_RING_RX);
|
|
+ if (ring->unaligned)
|
|
+ ring->index_base = lower_32_bits(ring->dma_base);
|
|
+ else
|
|
+ ring->index_base = 0;
|
|
+
|
|
/* Alloc RX slots */
|
|
for (j = 0; j < ring->num_slots; j++) {
|
|
err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
|
|
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac
|
|
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
|
|
ring = &bgmac->tx_ring[i];
|
|
|
|
- /* We don't implement unaligned addressing, so enable first */
|
|
- bgmac_dma_tx_enable(bgmac, ring);
|
|
+ if (!ring->unaligned)
|
|
+ bgmac_dma_tx_enable(bgmac, ring);
|
|
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
|
|
lower_32_bits(ring->dma_base));
|
|
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
|
|
upper_32_bits(ring->dma_base));
|
|
+ if (ring->unaligned)
|
|
+ bgmac_dma_tx_enable(bgmac, ring);
|
|
|
|
ring->start = 0;
|
|
ring->end = 0; /* Points the slot that should *not* be read */
|
|
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac
|
|
|
|
ring = &bgmac->rx_ring[i];
|
|
|
|
- /* We don't implement unaligned addressing, so enable first */
|
|
- bgmac_dma_rx_enable(bgmac, ring);
|
|
+ if (!ring->unaligned)
|
|
+ bgmac_dma_rx_enable(bgmac, ring);
|
|
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
|
|
lower_32_bits(ring->dma_base));
|
|
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
|
|
upper_32_bits(ring->dma_base));
|
|
+ if (ring->unaligned)
|
|
+ bgmac_dma_rx_enable(bgmac, ring);
|
|
|
|
for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
|
|
j++, dma_desc++) {
|
|
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac
|
|
}
|
|
|
|
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
|
|
+ ring->index_base +
|
|
ring->num_slots * sizeof(struct bgmac_dma_desc));
|
|
|
|
ring->start = 0;
|
|
--- a/drivers/net/ethernet/broadcom/bgmac.h
|
|
+++ b/drivers/net/ethernet/broadcom/bgmac.h
|
|
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
|
|
u16 mmio_base;
|
|
struct bgmac_dma_desc *cpu_base;
|
|
dma_addr_t dma_base;
|
|
+ u32 index_base; /* Used for unaligned rings only, otherwise 0 */
|
|
+ bool unaligned;
|
|
|
|
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
|
|
};
|