Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

Code Block
  struct stm32_ethmac_s
  {
    ...
    /* Descriptor allocations */
Code Block
    struct eth_rxdesc_s rxtable[CONFIG_STM32_ETH_NRXDESC];
    struct eth_txdesc_s txtable[CONFIG_STM32_ETH_NTXDESC];
Code Block
    /* Buffer allocations */
Code Block
    	uint8_t rxbuffer[CONFIG_STM32_ETH_NRXDESC*CONFIG_STM32_ETH_BUFSIZE];
    uint8_t alloc[STM32_ETH_NFREEBUFFERS*CONFIG_STM32_ETH_BUFSIZE];
  };

...

The following definitions were added to support aligning the sizes of the buffers to the Cortex-M7 D-Cache line size:

Code Block

  /* Buffers use fro DMA access must begin on an address aligned with the
 * D-Cache line and must be an even multiple of the D-Cache line size.

...


 * These size/alignment requirements are necessary so that D-Cache flush

...


 * and invalidate operations will not have any additional effects.

...

Code Block

  
 *
 * The TX and RX descriptors are normally 16 bytes in size but could be
 * 32 bytes in size if the enhanced descriptor format is used (it is not).
 */

#define DMA_BUFFER_MASK    (ARMV7M_DCACHE_LINESIZE - 1)
  #define DMA_ALIGN_UP(n)    (((n) + DMA_BUFFER_MASK) & ~DMA_BUFFER_MASK)
  #define DMA_ALIGN_DOWN(n)  ((n) & ~DMA_BUFFER_MASK)
Code Block
  #ifndef CONFIG_STM32F7_ETH_ENHANCEDDESC
  #  define RXDESC_SIZE       16
  #  define TXDESC_SIZE       16
  #else
  #  define RXDESC_SIZE       32
  #  define TXDESC_SIZE       32
  #endif
Code Block
  #define RXDESC_PADSIZE      DMA_ALIGN_UP(RXDESC_SIZE)
  #define TXDESC_PADSIZE      DMA_ALIGN_UP(TXDESC_SIZE)
  #define ALIGNED_BUFSIZE     DMA_ALIGN_UP(ETH_BUFSIZE)
Code Block
  #define RXTABLE_SIZE        (STM32F7_NETHERNET * CONFIG_STM32F7_ETH_NRXDESC)
  #define TXTABLE_SIZE        (STM32F7_NETHERNET * CONFIG_STM32F7_ETH_NTXDESC)
Code Block
  #define RXBUFFER_SIZE       (CONFIG_STM32F7_ETH_NRXDESC * ALIGNED_BUFSIZE)
  #define RXBUFFER_ALLOC      (STM32F7_NETHERNET * RXBUFFER_SIZE)
Code Block
  #define TXBUFFER_SIZE       (STM32_ETH_NFREEBUFFERS * ALIGNED_BUFSIZE)
  #define TXBUFFER_ALLOC      (STM32F7_NETHERNET * TXBUFFER_SIZE)

...

The RX and TX descriptor types are replace with a union type that assures that the allocations will be aligned in size:

Code Block

  /* This union type forces the allocated size of RX descriptors to be the
 * padded to a exact multiple of the Cortex-M7 D-Cache line size.

...

Code Block

  
 */

union stm32_txdesc_u
  {
    uint8_t             pad[TXDESC_PADSIZE];
    struct eth_txdesc_s txdesc;
  };
Code Block
  union stm32_rxdesc_u
  {
    uint8_t             pad[RXDESC_PADSIZE];
    struct eth_rxdesc_s rxdesc;
  }};


Then, finally, the new buffers are defined by the following globals:

Code Block

  /* DMA buffers.  DMA buffers must:
 *
 * 1. Be a multiple of the D-Cache line size.  This requirement is

...

Code Block

   assured
 *    by the definition of RXDMA buffer size above.
 * 2. Be aligned a D-Cache line boundaries, and
 * 3. Be positioned in DMA-able memory (*NOT* DTCM memory).  This must
 *    be managed by logic in the linker script file.
 *
 * These DMA buffers are defined sequentially here to best assure optimal
 * packing of the buffers.
 */

/* Descriptor allocations */
Code Block
  static union stm32_rxdesc_u g_rxtable[RXTABLE_SIZE]
    __attribute__((aligned(ARMV7M_DCACHE_LINESIZE)));
  static union stm32_txdesc_u g_txtable[TXTABLE_SIZE]
    __attribute__((aligned(ARMV7M_DCACHE_LINESIZE)));
Code Block
  /* Buffer allocations */
Code Block
  static uint8_t g_rxbuffer[RXBUFFER_ALLOC]
    __attribute__((aligned(ARMV7M_DCACHE_LINESIZE)));
  static uint8_t g_txbuffer[TXBUFFER_ALLOC]
    __attribute__((aligned(ARMV7M_DCACHE_LINESIZE)));

...

Here is an example where the RX descriptors are invalidated:

Code Block
  static int stm32_recvframe(struct stm32_ethmac_s *priv)
  {
  ...
    /* Scan descriptors owned by the CPU.  */
Code Block

  */

  rxdesc = priv->rxhead;
Code Block
    /* Forces the first RX descriptor to be re-read from physical memory */
Code Block
    arch_invalidate_dcache((uintptr_t)rxdesc,
                           (uintptr_t)rxdesc + sizeof(struct eth_rxdesc_s));
Code Block
    for (i = 0;
         (rxdesc->rdes0 & ETH_RDES0_OWN) == 0 &&
          i < CONFIG_STM32F7_ETH_NRXDESC &&
          priv->inflight < CONFIG_STM32F7_ETH_NTXDESC;
         i++)
      {
      ...
        /* Try the next descriptor */
Code Block
        rxdesc = (struct eth_rxdesc_s *)rxdesc->rdes3;
Code Block
        /* Force the next RX descriptor to be re-read from physical memory */
Code Block
        arch_invalidate_dcache((uintptr_t)rxdesc,
                               (uintptr_t)rxdesc + sizeof(struct eth_rxdesc_s));
      }
  ...
  }


Here is an example where a TX descriptor is cleaned:

Code Block
  static int stm32_transmit(struct stm32_ethmac_s *priv)
  {
  ...
            /* Give the descriptor to DMA */
Code Block
            txdesc->tdes0 |= ETH_TDES0_OWN;
Code Block
            /* Flush the contents of the modified TX descriptor into physical
           * memory.

...

Code Block


           */

          arch_clean_dcache((uintptr_t)txdesc,
                              (uintptr_t)txdesc + sizeof(struct eth_txdesc_s));
  ...
  }


Here is where the read buffer is invalidated just after completed a read DMA:

Code Block

  static int stm32_recvframe(struct stm32_ethmac_s *priv)
  {
  ...
                ...
	/* Force the completed RX DMA buffer to be re-read from
	 * physical memory.

...

Code Block

                
	 */

	arch_invalidate_dcache((uintptr_t)dev->d_buf,
                                       (uintptr_t)dev->d_buf + dev->d_len);
Code Block

                

	nllvdbg("rxhead: %p d_buf: %p d_len: %d\n",
                        priv->rxhead, dev->d_buf, dev->d_len);
Code Block
                /* Return success*/
Code Block
                return OK;
  ...
  }


Here is where the write buffer in clean prior to starting a write DMA:

Code Block
  static int stm32_transmit(struct stm32_ethmac_s *priv)
  {
  ...
    /* Flush the contents of the TX buffer into physical memory */
Code Block
    arch_clean_dcache((uintptr_t)priv->dev.d_buf,
                      (uintptr_t)priv->dev.d_buf + priv->dev.d_len);
  ...
  }