2017-06-23 13:03:51 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Erwin Rol <erwin@erwinrol.com>
|
2020-06-25 15:50:42 +02:00
|
|
|
* Copyright (c) 2020 Alexander Kozhinov <AlexanderKozhinov@yandex.com>
|
2021-09-13 09:24:53 +02:00
|
|
|
* Copyright (c) 2021 Carbon Robotics
|
2017-06-23 13:03:51 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2020-07-15 13:08:06 +02:00
|
|
|
#define DT_DRV_COMPAT st_stm32_ethernet
|
|
|
|
|
2018-07-09 11:51:28 +02:00
|
|
|
#define LOG_MODULE_NAME eth_stm32_hal
|
|
|
|
#define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
|
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2018-07-09 11:51:28 +02:00
|
|
|
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/device.h>
|
|
|
|
#include <zephyr/sys/__assert.h>
|
|
|
|
#include <zephyr/sys/util.h>
|
2023-01-17 06:31:15 +01:00
|
|
|
#include <zephyr/sys/crc.h>
|
2017-06-23 13:03:51 +02:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdbool.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/net/net_pkt.h>
|
|
|
|
#include <zephyr/net/net_if.h>
|
|
|
|
#include <zephyr/net/ethernet.h>
|
2018-12-05 14:52:59 +01:00
|
|
|
#include <ethernet/eth_stats.h>
|
2017-06-23 13:03:51 +02:00
|
|
|
#include <soc.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/sys/printk.h>
|
|
|
|
#include <zephyr/drivers/clock_control.h>
|
|
|
|
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
|
|
|
|
#include <zephyr/drivers/pinctrl.h>
|
2022-10-17 10:24:11 +02:00
|
|
|
#include <zephyr/irq.h>
|
2022-11-11 12:48:01 +01:00
|
|
|
#include <zephyr/net/lldp.h>
|
2022-12-16 15:31:24 +01:00
|
|
|
#include <zephyr/drivers/hwinfo.h>
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/drivers/ptp_clock.h>
|
2021-09-13 09:24:53 +02:00
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2020-04-23 19:13:14 +02:00
|
|
|
#include "eth.h"
|
2017-06-23 13:03:51 +02:00
|
|
|
#include "eth_stm32_hal_priv.h"
|
|
|
|
|
2022-12-16 14:34:24 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_RANDOM_MAC) || DT_INST_PROP(0, zephyr_random_mac_address)
|
|
|
|
#define ETH_STM32_RANDOM_MAC
|
|
|
|
#endif
|
|
|
|
|
2019-06-27 14:50:51 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER) && \
|
2020-05-11 20:56:08 +02:00
|
|
|
!DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
|
2020-04-24 00:51:08 +02:00
|
|
|
#error DTCM for DMA buffer is activated but zephyr,dtcm is not present in dts
|
2019-06-27 14:50:51 +02:00
|
|
|
#endif
|
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
#define PHY_ADDR CONFIG_ETH_STM32_HAL_PHY_ADDRESS
|
|
|
|
|
2020-06-25 15:50:42 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
|
|
|
|
#define PHY_BSR ((uint16_t)0x0001U) /*!< Transceiver Basic Status Register */
|
|
|
|
#define PHY_LINKED_STATUS ((uint16_t)0x0004U) /*!< Valid link established */
|
|
|
|
|
|
|
|
#define IS_ETH_DMATXDESC_OWN(dma_tx_desc) (dma_tx_desc->DESC3 & \
|
|
|
|
ETH_DMATXNDESCRF_OWN)
|
|
|
|
|
|
|
|
#define ETH_RXBUFNB ETH_RX_DESC_CNT
|
|
|
|
#define ETH_TXBUFNB ETH_TX_DESC_CNT
|
|
|
|
|
|
|
|
#define ETH_MEDIA_INTERFACE_MII HAL_ETH_MII_MODE
|
|
|
|
#define ETH_MEDIA_INTERFACE_RMII HAL_ETH_RMII_MODE
|
|
|
|
|
2020-07-28 10:58:06 +02:00
|
|
|
/* Only one tx_buffer is sufficient to pass only 1 dma_buffer */
|
|
|
|
#define ETH_TXBUF_DEF_NB 1U
|
2020-06-25 15:50:42 +02:00
|
|
|
#else
|
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
#define IS_ETH_DMATXDESC_OWN(dma_tx_desc) (dma_tx_desc->Status & \
|
|
|
|
ETH_DMATXDESC_OWN)
|
|
|
|
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
2022-07-30 18:18:46 +02:00
|
|
|
#define ETH_DMA_TX_TIMEOUT_MS 20U /* transmit timeout in milliseconds */
|
|
|
|
|
2019-06-27 14:50:51 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER) && \
|
2020-05-11 20:56:08 +02:00
|
|
|
DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
|
2021-03-01 19:06:52 +01:00
|
|
|
#define __eth_stm32_desc __dtcm_noinit_section
|
|
|
|
#define __eth_stm32_buf __dtcm_noinit_section
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X) && \
|
|
|
|
DT_NODE_HAS_STATUS(DT_NODELABEL(sram3), okay)
|
|
|
|
#define __eth_stm32_desc __attribute__((section(".eth_stm32_desc")))
|
|
|
|
#define __eth_stm32_buf __attribute__((section(".eth_stm32_buf")))
|
|
|
|
#elif defined(CONFIG_NOCACHE_MEMORY)
|
|
|
|
#define __eth_stm32_desc __nocache __aligned(4)
|
|
|
|
#define __eth_stm32_buf __nocache __aligned(4)
|
2019-06-27 14:50:51 +02:00
|
|
|
#else
|
2021-03-01 19:06:52 +01:00
|
|
|
#define __eth_stm32_desc __aligned(4)
|
|
|
|
#define __eth_stm32_buf __aligned(4)
|
2020-06-25 15:46:00 +02:00
|
|
|
#endif
|
|
|
|
|
2021-03-01 19:06:52 +01:00
|
|
|
static ETH_DMADescTypeDef dma_rx_desc_tab[ETH_RXBUFNB] __eth_stm32_desc;
|
|
|
|
static ETH_DMADescTypeDef dma_tx_desc_tab[ETH_TXBUFNB] __eth_stm32_desc;
|
2022-08-17 14:21:45 +02:00
|
|
|
static uint8_t dma_rx_buffer[ETH_RXBUFNB][ETH_STM32_RX_BUF_SIZE] __eth_stm32_buf;
|
|
|
|
static uint8_t dma_tx_buffer[ETH_TXBUFNB][ETH_STM32_TX_BUF_SIZE] __eth_stm32_buf;
|
2020-06-25 15:46:00 +02:00
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
|
|
|
|
|
2021-10-04 11:44:47 +02:00
|
|
|
static struct net_if_mcast_monitor mcast_monitor;
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
static K_MUTEX_DEFINE(multicast_addr_lock);
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV6)
|
|
|
|
static struct in6_addr multicast_ipv6_joined_addrs[NET_IF_MAX_IPV6_MADDR] = {0};
|
|
|
|
#endif /* CONFIG_NET_NATIVE_IPV6 */
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4)
|
|
|
|
static struct in_addr multicast_ipv4_joined_addrs[NET_IF_MAX_IPV4_MADDR] = {0};
|
|
|
|
#endif /* CONFIG_NET_NATIVE_IPV4 */
|
|
|
|
|
|
|
|
#endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
|
|
|
|
|
2022-05-18 16:10:08 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
|
|
|
|
BUILD_ASSERT(ETH_STM32_RX_BUF_SIZE % 4 == 0, "Rx buffer size must be a multiple of 4");
|
|
|
|
|
|
|
|
struct eth_stm32_rx_buffer_header {
|
|
|
|
struct eth_stm32_rx_buffer_header *next;
|
|
|
|
uint16_t size;
|
|
|
|
bool used;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct eth_stm32_tx_buffer_header {
|
|
|
|
ETH_BufferTypeDef tx_buff;
|
|
|
|
bool used;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct eth_stm32_tx_context {
|
|
|
|
struct net_pkt *pkt;
|
|
|
|
uint16_t first_tx_buffer_index;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct eth_stm32_rx_buffer_header dma_rx_buffer_header[ETH_RXBUFNB];
|
|
|
|
static struct eth_stm32_tx_buffer_header dma_tx_buffer_header[ETH_TXBUFNB];
|
|
|
|
|
2022-05-19 11:40:57 +02:00
|
|
|
void HAL_ETH_RxAllocateCallback(uint8_t **buf)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < ETH_RXBUFNB; ++i) {
|
|
|
|
if (!dma_rx_buffer_header[i].used) {
|
|
|
|
dma_rx_buffer_header[i].next = NULL;
|
|
|
|
dma_rx_buffer_header[i].size = 0;
|
|
|
|
dma_rx_buffer_header[i].used = true;
|
|
|
|
*buf = dma_rx_buffer[i];
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*buf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pointer to an array of ETH_STM32_RX_BUF_SIZE uint8_t's */
|
|
|
|
typedef uint8_t (*RxBufferPtr)[ETH_STM32_RX_BUF_SIZE];
|
|
|
|
|
|
|
|
/* called by HAL_ETH_ReadData() */
|
|
|
|
void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length)
|
|
|
|
{
|
|
|
|
/* buff points to the begin on one of the rx buffers,
|
|
|
|
* so we can compute the index of the given buffer
|
|
|
|
*/
|
|
|
|
size_t index = (RxBufferPtr)buff - &dma_rx_buffer[0];
|
|
|
|
struct eth_stm32_rx_buffer_header *header = &dma_rx_buffer_header[index];
|
|
|
|
|
|
|
|
__ASSERT_NO_MSG(index < ETH_RXBUFNB);
|
|
|
|
|
|
|
|
header->size = Length;
|
|
|
|
|
|
|
|
if (!*pStart) {
|
|
|
|
/* first packet, set head pointer of linked list */
|
|
|
|
*pStart = header;
|
|
|
|
*pEnd = header;
|
|
|
|
} else {
|
|
|
|
__ASSERT_NO_MSG(*pEnd != NULL);
|
|
|
|
/* not the first packet, add to list and adjust tail pointer */
|
|
|
|
((struct eth_stm32_rx_buffer_header *)*pEnd)->next = header;
|
|
|
|
*pEnd = header;
|
|
|
|
}
|
|
|
|
}
|
2022-05-18 16:41:43 +02:00
|
|
|
|
|
|
|
/* Called by HAL_ETH_ReleaseTxPacket */
|
|
|
|
void HAL_ETH_TxFreeCallback(uint32_t *buff)
|
|
|
|
{
|
|
|
|
__ASSERT_NO_MSG(buff != NULL);
|
|
|
|
|
|
|
|
/* buff is the user context in tx_config.pData */
|
|
|
|
struct eth_stm32_tx_context *ctx = (struct eth_stm32_tx_context *)buff;
|
|
|
|
struct eth_stm32_tx_buffer_header *buffer_header =
|
|
|
|
&dma_tx_buffer_header[ctx->first_tx_buffer_index];
|
|
|
|
|
|
|
|
while (buffer_header != NULL) {
|
|
|
|
buffer_header->used = false;
|
|
|
|
if (buffer_header->tx_buff.next != NULL) {
|
|
|
|
buffer_header = CONTAINER_OF(buffer_header->tx_buff.next,
|
|
|
|
struct eth_stm32_tx_buffer_header, tx_buff);
|
|
|
|
} else {
|
|
|
|
buffer_header = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-07-30 17:18:57 +02:00
|
|
|
|
|
|
|
/* allocate a tx buffer and mark it as used */
|
|
|
|
static inline uint16_t allocate_tx_buffer(void)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
for (uint16_t index = 0; index < ETH_TXBUFNB; index++) {
|
|
|
|
if (!dma_tx_buffer_header[index].used) {
|
|
|
|
dma_tx_buffer_header[index].used = true;
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
}
|
2022-05-18 16:10:08 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2021-03-01 19:06:52 +01:00
|
|
|
static ETH_TxPacketConfig tx_config;
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif
|
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
static HAL_StatusTypeDef read_eth_phy_register(ETH_HandleTypeDef *heth,
|
|
|
|
uint32_t PHYAddr,
|
|
|
|
uint32_t PHYReg,
|
|
|
|
uint32_t *RegVal)
|
|
|
|
{
|
2022-05-18 16:41:43 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2020-06-25 15:50:42 +02:00
|
|
|
return HAL_ETH_ReadPHYRegister(heth, PHYAddr, PHYReg, RegVal);
|
|
|
|
#else
|
2020-06-25 15:46:00 +02:00
|
|
|
ARG_UNUSED(PHYAddr);
|
|
|
|
return HAL_ETH_ReadPHYRegister(heth, PHYReg, RegVal);
|
2022-05-18 16:41:43 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:46:00 +02:00
|
|
|
}
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
static inline void setup_mac_filter(ETH_HandleTypeDef *heth)
|
2017-06-30 18:14:19 +02:00
|
|
|
{
|
|
|
|
__ASSERT_NO_MSG(heth != NULL);
|
|
|
|
|
2020-06-25 15:50:42 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
ETH_MACFilterConfigTypeDef MACFilterConf;
|
|
|
|
|
|
|
|
HAL_ETH_GetMACFilterConfig(heth, &MACFilterConf);
|
2023-01-17 06:31:15 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
|
2021-10-04 11:44:47 +02:00
|
|
|
MACFilterConf.HashMulticast = ENABLE;
|
|
|
|
MACFilterConf.PassAllMulticast = DISABLE;
|
2023-01-17 06:31:15 +01:00
|
|
|
#else
|
|
|
|
MACFilterConf.HashMulticast = DISABLE;
|
|
|
|
MACFilterConf.PassAllMulticast = ENABLE;
|
|
|
|
#endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
|
2020-06-25 15:50:42 +02:00
|
|
|
MACFilterConf.HachOrPerfectFilter = DISABLE;
|
|
|
|
|
|
|
|
HAL_ETH_SetMACFilterConfig(heth, &MACFilterConf);
|
|
|
|
|
|
|
|
k_sleep(K_MSEC(1));
|
|
|
|
#else
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t tmp = heth->Instance->MACFFR;
|
2017-06-30 18:14:19 +02:00
|
|
|
|
2021-10-04 11:44:47 +02:00
|
|
|
/* disable multicast perfect filtering */
|
2017-06-30 18:14:19 +02:00
|
|
|
tmp &= ~(ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE |
|
2023-01-17 06:31:15 +01:00
|
|
|
#if !defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
|
|
|
|
ETH_MULTICASTFRAMESFILTER_HASHTABLE |
|
|
|
|
#endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
|
2017-06-30 18:14:19 +02:00
|
|
|
ETH_MULTICASTFRAMESFILTER_PERFECT);
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
|
2021-10-04 11:44:47 +02:00
|
|
|
/* enable multicast hash receive filter */
|
|
|
|
tmp |= ETH_MULTICASTFRAMESFILTER_HASHTABLE;
|
2023-01-17 06:31:15 +01:00
|
|
|
#endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
|
2017-06-30 18:14:19 +02:00
|
|
|
|
|
|
|
heth->Instance->MACFFR = tmp;
|
|
|
|
|
|
|
|
/* Wait until the write operation will be taken into account:
|
|
|
|
* at least four TX_CLK/RX_CLK clock cycles
|
|
|
|
*/
|
|
|
|
tmp = heth->Instance->MACFFR;
|
2019-10-06 21:02:31 +02:00
|
|
|
k_sleep(K_MSEC(1));
|
2017-06-30 18:14:19 +02:00
|
|
|
heth->Instance->MACFFR = tmp;
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X) */
|
2017-06-30 18:14:19 +02:00
|
|
|
}
|
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
static bool eth_is_ptp_pkt(struct net_if *iface, struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_VLAN)
|
|
|
|
struct net_eth_vlan_hdr *hdr_vlan;
|
|
|
|
struct ethernet_context *eth_ctx;
|
|
|
|
|
|
|
|
eth_ctx = net_if_l2_data(iface);
|
|
|
|
if (net_eth_is_vlan_enabled(eth_ctx, iface)) {
|
|
|
|
hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
|
|
|
|
|
|
|
|
if (ntohs(hdr_vlan->type) != NET_ETH_PTYPE_PTP) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
net_pkt_set_priority(pkt, NET_PRIORITY_CA);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2022-05-18 16:41:43 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp)
|
|
|
|
{
|
|
|
|
struct eth_stm32_tx_context *ctx = (struct eth_stm32_tx_context *)buff;
|
|
|
|
|
|
|
|
ctx->pkt->timestamp.second = timestamp->TimeStampHigh;
|
|
|
|
ctx->pkt->timestamp.nanosecond = timestamp->TimeStampLow;
|
|
|
|
|
|
|
|
net_if_add_tx_timestamp(ctx->pkt);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2021-09-13 09:24:53 +02:00
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int eth_tx(const struct device *dev, struct net_pkt *pkt)
|
2017-06-23 13:03:51 +02:00
|
|
|
{
|
2022-01-18 15:48:18 +01:00
|
|
|
struct eth_stm32_hal_dev_data *dev_data = dev->data;
|
2017-06-23 13:03:51 +02:00
|
|
|
ETH_HandleTypeDef *heth;
|
|
|
|
int res;
|
2020-06-25 15:46:00 +02:00
|
|
|
size_t total_len;
|
2022-07-30 17:18:57 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
size_t remaining_read;
|
|
|
|
struct eth_stm32_tx_context ctx = {.pkt = pkt, .first_tx_buffer_index = 0};
|
|
|
|
struct eth_stm32_tx_buffer_header *buf_header = NULL;
|
|
|
|
#else
|
|
|
|
uint8_t *dma_buffer;
|
2017-06-23 13:03:51 +02:00
|
|
|
__IO ETH_DMADescTypeDef *dma_tx_desc;
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:46:00 +02:00
|
|
|
HAL_StatusTypeDef hal_ret = HAL_OK;
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
bool timestamped_frame;
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(pkt != NULL);
|
|
|
|
__ASSERT_NO_MSG(pkt->frags != NULL);
|
|
|
|
__ASSERT_NO_MSG(dev != NULL);
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
heth = &dev_data->heth;
|
|
|
|
|
2018-06-27 15:06:18 +02:00
|
|
|
total_len = net_pkt_get_len(pkt);
|
2022-08-13 18:49:55 +02:00
|
|
|
if (total_len > (ETH_STM32_TX_BUF_SIZE * ETH_TXBUFNB)) {
|
2020-07-27 13:44:47 +02:00
|
|
|
LOG_ERR("PKT too big");
|
2022-07-30 17:13:41 +02:00
|
|
|
return -EIO;
|
2017-06-23 13:03:51 +02:00
|
|
|
}
|
|
|
|
|
2022-07-30 17:13:41 +02:00
|
|
|
k_mutex_lock(&dev_data->tx_mutex, K_FOREVER);
|
|
|
|
|
2022-07-30 17:18:57 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
ctx.first_tx_buffer_index = allocate_tx_buffer();
|
|
|
|
buf_header = &dma_tx_buffer_header[ctx.first_tx_buffer_index];
|
|
|
|
#else /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
2021-09-13 09:24:53 +02:00
|
|
|
uint32_t cur_tx_desc_idx;
|
|
|
|
|
|
|
|
cur_tx_desc_idx = heth->TxDescList.CurTxDesc;
|
|
|
|
dma_tx_desc = (ETH_DMADescTypeDef *)heth->TxDescList.TxDesc[cur_tx_desc_idx];
|
|
|
|
#else
|
|
|
|
dma_tx_desc = heth->TxDesc;
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
while (IS_ETH_DMATXDESC_OWN(dma_tx_desc) != (uint32_t)RESET) {
|
2017-06-23 13:03:51 +02:00
|
|
|
k_yield();
|
|
|
|
}
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
timestamped_frame = eth_is_ptp_pkt(net_pkt_iface(pkt), pkt);
|
|
|
|
if (timestamped_frame) {
|
|
|
|
/* Enable transmit timestamp */
|
2022-07-30 17:18:57 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
HAL_ETH_PTP_InsertTxTimestamp(heth);
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
|
2021-09-13 09:24:53 +02:00
|
|
|
dma_tx_desc->DESC2 |= ETH_DMATXNDESCRF_TTSE;
|
|
|
|
#else
|
|
|
|
dma_tx_desc->Status |= ETH_DMATXDESC_TTSE;
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2021-09-13 09:24:53 +02:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2022-07-30 17:18:57 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
remaining_read = total_len;
|
|
|
|
/* fill and allocate buffer until remaining data fits in one buffer */
|
|
|
|
while (remaining_read > ETH_STM32_TX_BUF_SIZE) {
|
|
|
|
if (net_pkt_read(pkt, buf_header->tx_buff.buffer, ETH_STM32_TX_BUF_SIZE)) {
|
|
|
|
res = -ENOBUFS;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
const uint16_t next_buffer_id = allocate_tx_buffer();
|
|
|
|
|
|
|
|
buf_header->tx_buff.len = ETH_STM32_TX_BUF_SIZE;
|
|
|
|
/* append new buffer to the linked list */
|
|
|
|
buf_header->tx_buff.next = &dma_tx_buffer_header[next_buffer_id].tx_buff;
|
|
|
|
/* and adjust tail pointer */
|
|
|
|
buf_header = &dma_tx_buffer_header[next_buffer_id];
|
|
|
|
remaining_read -= ETH_STM32_TX_BUF_SIZE;
|
|
|
|
}
|
|
|
|
if (net_pkt_read(pkt, buf_header->tx_buff.buffer, remaining_read)) {
|
|
|
|
res = -ENOBUFS;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
buf_header->tx_buff.len = remaining_read;
|
|
|
|
buf_header->tx_buff.next = NULL;
|
|
|
|
|
|
|
|
#else /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
dma_buffer = dma_tx_buffer[cur_tx_desc_idx];
|
|
|
|
#else
|
2020-05-27 18:26:57 +02:00
|
|
|
dma_buffer = (uint8_t *)(dma_tx_desc->Buffer1Addr);
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2019-02-20 09:40:48 +01:00
|
|
|
if (net_pkt_read(pkt, dma_buffer, total_len)) {
|
2020-09-16 12:33:37 +02:00
|
|
|
res = -ENOBUFS;
|
2018-11-27 20:16:42 +01:00
|
|
|
goto error;
|
2017-06-23 13:03:51 +02:00
|
|
|
}
|
|
|
|
|
2020-06-25 15:50:42 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
2021-09-13 09:24:53 +02:00
|
|
|
ETH_BufferTypeDef tx_buffer_def;
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
tx_buffer_def.buffer = dma_buffer;
|
|
|
|
tx_buffer_def.len = total_len;
|
|
|
|
tx_buffer_def.next = NULL;
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2020-06-25 15:50:42 +02:00
|
|
|
|
|
|
|
tx_config.Length = total_len;
|
2022-07-30 17:18:57 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
tx_config.pData = &ctx;
|
|
|
|
tx_config.TxBuffer = &dma_tx_buffer_header[ctx.first_tx_buffer_index].tx_buff;
|
|
|
|
#else
|
2021-09-13 09:24:53 +02:00
|
|
|
tx_config.TxBuffer = &tx_buffer_def;
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2020-07-27 13:44:47 +02:00
|
|
|
/* Reset TX complete interrupt semaphore before TX request*/
|
|
|
|
k_sem_reset(&dev_data->tx_int_sem);
|
|
|
|
|
|
|
|
/* tx_buffer is allocated on function stack, we need */
|
|
|
|
/* to wait for the transfer to complete */
|
|
|
|
/* So it is not freed before the interrupt happens */
|
|
|
|
hal_ret = HAL_ETH_Transmit_IT(heth, &tx_config);
|
|
|
|
|
|
|
|
if (hal_ret != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_Transmit: failed!");
|
|
|
|
res = -EIO;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for end of TX buffer transmission */
|
|
|
|
/* If the semaphore timeout breaks, it means */
|
|
|
|
/* an error occurred or IT was not fired */
|
|
|
|
if (k_sem_take(&dev_data->tx_int_sem,
|
|
|
|
K_MSEC(ETH_DMA_TX_TIMEOUT_MS)) != 0) {
|
|
|
|
|
|
|
|
LOG_ERR("HAL_ETH_TransmitIT tx_int_sem take timeout");
|
|
|
|
res = -EIO;
|
|
|
|
|
2022-07-30 17:18:57 +02:00
|
|
|
#ifndef CONFIG_ETH_STM32_HAL_API_V2
|
2020-07-27 13:44:47 +02:00
|
|
|
/* Content of the packet could be the reason for timeout */
|
|
|
|
LOG_HEXDUMP_ERR(dma_buffer, total_len, "eth packet timeout");
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif
|
2020-07-27 13:44:47 +02:00
|
|
|
|
|
|
|
/* Check for errors */
|
|
|
|
/* Ethernet device was put in error state */
|
|
|
|
/* Error state is unrecoverable ? */
|
|
|
|
if (HAL_ETH_GetState(heth) == HAL_ETH_STATE_ERROR) {
|
|
|
|
LOG_ERR("%s: ETH in error state: errorcode:%x",
|
|
|
|
__func__,
|
|
|
|
HAL_ETH_GetError(heth));
|
|
|
|
/* TODO recover from error state by restarting eth */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for DMA errors */
|
|
|
|
if (HAL_ETH_GetDMAError(heth)) {
|
|
|
|
LOG_ERR("%s: ETH DMA error: dmaerror:%x",
|
|
|
|
__func__,
|
|
|
|
HAL_ETH_GetDMAError(heth));
|
|
|
|
/* DMA fatal bus errors are putting in error state*/
|
|
|
|
/* TODO recover from this */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for MAC errors */
|
|
|
|
if (HAL_ETH_GetDMAError(heth)) {
|
|
|
|
LOG_ERR("%s: ETH DMA error: macerror:%x",
|
|
|
|
__func__,
|
|
|
|
HAL_ETH_GetDMAError(heth));
|
|
|
|
/* MAC errors are putting in error state*/
|
|
|
|
/* TODO recover from this */
|
|
|
|
}
|
|
|
|
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-06-25 15:50:42 +02:00
|
|
|
#else
|
2020-06-25 15:46:00 +02:00
|
|
|
hal_ret = HAL_ETH_TransmitFrame(heth, total_len);
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
if (hal_ret != HAL_OK) {
|
2020-07-27 13:44:47 +02:00
|
|
|
LOG_ERR("HAL_ETH_Transmit: failed!");
|
2017-06-23 13:03:51 +02:00
|
|
|
res = -EIO;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When Transmit Underflow flag is set, clear it and issue a
|
|
|
|
* Transmit Poll Demand to resume transmission.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
if ((heth->Instance->DMASR & ETH_DMASR_TUS) != (uint32_t)RESET) {
|
2017-06-23 13:03:51 +02:00
|
|
|
/* Clear TUS ETHERNET DMA flag */
|
|
|
|
heth->Instance->DMASR = ETH_DMASR_TUS;
|
|
|
|
/* Resume DMA transmission*/
|
|
|
|
heth->Instance->DMATPDR = 0;
|
|
|
|
res = -EIO;
|
|
|
|
goto error;
|
|
|
|
}
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2022-07-30 17:18:57 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL) && !defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2021-09-13 09:24:53 +02:00
|
|
|
if (timestamped_frame) {
|
|
|
|
/* Retrieve transmission timestamp from last DMA TX descriptor */
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
ETH_TxDescListTypeDef * dma_tx_desc_list;
|
|
|
|
|
|
|
|
__IO ETH_DMADescTypeDef *last_dma_tx_desc;
|
|
|
|
|
|
|
|
dma_tx_desc_list = &heth->TxDescList;
|
|
|
|
for (uint32_t i = 0; i < ETH_TX_DESC_CNT; i++) {
|
|
|
|
const uint32_t last_desc_idx = (cur_tx_desc_idx + i) % ETH_TX_DESC_CNT;
|
|
|
|
|
|
|
|
last_dma_tx_desc =
|
|
|
|
(ETH_DMADescTypeDef *)dma_tx_desc_list->TxDesc[last_desc_idx];
|
|
|
|
if (last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_LD) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (IS_ETH_DMATXDESC_OWN(last_dma_tx_desc) != (uint32_t)RESET) {
|
|
|
|
/* Wait for transmission */
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_LD) &&
|
|
|
|
(last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_TTSS)) {
|
|
|
|
pkt->timestamp.second = last_dma_tx_desc->DESC1;
|
|
|
|
pkt->timestamp.nanosecond = last_dma_tx_desc->DESC0;
|
|
|
|
} else {
|
|
|
|
/* Invalid value */
|
|
|
|
pkt->timestamp.second = UINT64_MAX;
|
|
|
|
pkt->timestamp.nanosecond = UINT32_MAX;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
__IO ETH_DMADescTypeDef *last_dma_tx_desc = dma_tx_desc;
|
|
|
|
|
|
|
|
while (!(last_dma_tx_desc->Status & ETH_DMATXDESC_LS) &&
|
|
|
|
last_dma_tx_desc->Buffer2NextDescAddr) {
|
|
|
|
last_dma_tx_desc =
|
|
|
|
(ETH_DMADescTypeDef *)last_dma_tx_desc->Buffer2NextDescAddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (IS_ETH_DMATXDESC_OWN(last_dma_tx_desc) != (uint32_t)RESET) {
|
|
|
|
/* Wait for transmission */
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_dma_tx_desc->Status & ETH_DMATXDESC_LS &&
|
|
|
|
last_dma_tx_desc->Status & ETH_DMATXDESC_TTSS) {
|
|
|
|
pkt->timestamp.second = last_dma_tx_desc->TimeStampHigh;
|
|
|
|
pkt->timestamp.nanosecond = last_dma_tx_desc->TimeStampLow;
|
|
|
|
} else {
|
|
|
|
/* Invalid value */
|
|
|
|
pkt->timestamp.second = UINT64_MAX;
|
|
|
|
pkt->timestamp.nanosecond = UINT32_MAX;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
net_if_add_tx_timestamp(pkt);
|
|
|
|
}
|
2022-07-30 17:18:57 +02:00
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL && !CONFIG_ETH_STM32_HAL_API_V2 */
|
2021-09-13 09:24:53 +02:00
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
res = 0;
|
|
|
|
error:
|
2022-07-30 17:18:57 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
/* free package tx buffer */
|
|
|
|
if (res != 0) {
|
|
|
|
HAL_ETH_TxFreeCallback((uint32_t *)&ctx);
|
|
|
|
} else if (HAL_ETH_ReleaseTxPacket(heth) != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_ReleaseTxPacket failed");
|
|
|
|
res = -EIO;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
k_mutex_unlock(&dev_data->tx_mutex);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-02-11 18:50:11 +01:00
|
|
|
static struct net_if *get_iface(struct eth_stm32_hal_dev_data *ctx,
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t vlan_tag)
|
2020-02-11 18:50:11 +01:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_VLAN)
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
|
|
|
|
if (!iface) {
|
|
|
|
return ctx->iface;
|
|
|
|
}
|
|
|
|
|
|
|
|
return iface;
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(vlan_tag);
|
|
|
|
|
|
|
|
return ctx->iface;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static struct net_pkt *eth_rx(const struct device *dev, uint16_t *vlan_tag)
|
2017-06-23 13:03:51 +02:00
|
|
|
{
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
|
|
|
ETH_HandleTypeDef *heth;
|
2022-07-30 17:19:27 +02:00
|
|
|
struct net_pkt *pkt;
|
|
|
|
size_t total_len = 0;
|
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
void *appbuf = NULL;
|
|
|
|
struct eth_stm32_rx_buffer_header *rx_header;
|
|
|
|
#else
|
2020-06-25 15:50:42 +02:00
|
|
|
#if !defined(CONFIG_SOC_SERIES_STM32H7X)
|
2017-06-23 13:03:51 +02:00
|
|
|
__IO ETH_DMADescTypeDef *dma_rx_desc;
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif /* !CONFIG_SOC_SERIES_STM32H7X */
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *dma_buffer;
|
2020-06-25 15:46:00 +02:00
|
|
|
HAL_StatusTypeDef hal_ret = HAL_OK;
|
2022-07-30 17:19:27 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
struct net_ptp_time timestamp;
|
2022-07-30 17:19:27 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
ETH_TimeStampTypeDef ts_registers;
|
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2021-09-13 09:24:53 +02:00
|
|
|
/* Default to invalid value. */
|
|
|
|
timestamp.second = UINT64_MAX;
|
|
|
|
timestamp.nanosecond = UINT32_MAX;
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev != NULL);
|
|
|
|
|
2022-01-18 15:48:18 +01:00
|
|
|
dev_data = dev->data;
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
heth = &dev_data->heth;
|
|
|
|
|
2022-07-30 17:19:27 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
if (HAL_ETH_ReadData(heth, &appbuf) != HAL_OK) {
|
|
|
|
/* no frame available */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* computing total length */
|
|
|
|
for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf;
|
|
|
|
rx_header; rx_header = rx_header->next) {
|
|
|
|
total_len += rx_header->size;
|
|
|
|
}
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
|
2020-06-25 15:50:42 +02:00
|
|
|
if (HAL_ETH_IsRxDataAvailable(heth) != true) {
|
|
|
|
/* no frame available */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ETH_BufferTypeDef rx_buffer_def;
|
|
|
|
uint32_t frame_length = 0;
|
|
|
|
|
|
|
|
hal_ret = HAL_ETH_GetRxDataBuffer(heth, &rx_buffer_def);
|
|
|
|
if (hal_ret != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_GetRxDataBuffer: failed with state: %d",
|
|
|
|
hal_ret);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
hal_ret = HAL_ETH_GetRxDataLength(heth, &frame_length);
|
|
|
|
if (hal_ret != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_GetRxDataLength: failed with state: %d",
|
|
|
|
hal_ret);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_len = frame_length;
|
|
|
|
dma_buffer = rx_buffer_def.buffer;
|
|
|
|
#else
|
2020-06-25 15:46:00 +02:00
|
|
|
hal_ret = HAL_ETH_GetReceivedFrame_IT(heth);
|
|
|
|
if (hal_ret != HAL_OK) {
|
2017-06-23 13:03:51 +02:00
|
|
|
/* no frame available */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_len = heth->RxFrameInfos.length;
|
2020-05-27 18:26:57 +02:00
|
|
|
dma_buffer = (uint8_t *)heth->RxFrameInfos.buffer;
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
2022-07-30 17:19:27 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
|
|
|
|
if (HAL_ETH_PTP_GetRxTimestamp(heth, &ts_registers) == HAL_OK) {
|
|
|
|
timestamp.second = ts_registers.TimeStampHigh;
|
|
|
|
timestamp.nanosecond = ts_registers.TimeStampLow;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
|
2021-09-13 09:24:53 +02:00
|
|
|
ETH_RxDescListTypeDef * dma_rx_desc_list;
|
|
|
|
|
|
|
|
dma_rx_desc_list = &heth->RxDescList;
|
|
|
|
if (dma_rx_desc_list->AppDescNbr) {
|
|
|
|
__IO ETH_DMADescTypeDef *last_dma_rx_desc;
|
|
|
|
|
|
|
|
const uint32_t last_desc_idx =
|
|
|
|
(dma_rx_desc_list->FirstAppDesc + dma_rx_desc_list->AppDescNbr - 1U)
|
|
|
|
% ETH_RX_DESC_CNT;
|
|
|
|
|
|
|
|
last_dma_rx_desc =
|
|
|
|
(ETH_DMADescTypeDef *)dma_rx_desc_list->RxDesc[last_desc_idx];
|
|
|
|
|
|
|
|
if (dma_rx_desc_list->AppContextDesc &&
|
|
|
|
last_dma_rx_desc->DESC1 & ETH_DMARXNDESCWBF_TSA) {
|
|
|
|
/* Retrieve timestamp from context DMA descriptor */
|
|
|
|
__IO ETH_DMADescTypeDef *context_dma_rx_desc;
|
|
|
|
|
|
|
|
const uint32_t context_desc_idx = (last_desc_idx + 1U) % ETH_RX_DESC_CNT;
|
|
|
|
|
|
|
|
context_dma_rx_desc =
|
|
|
|
(ETH_DMADescTypeDef *)dma_rx_desc_list->RxDesc[context_desc_idx];
|
|
|
|
if (context_dma_rx_desc->DESC1 != UINT32_MAX ||
|
|
|
|
context_dma_rx_desc->DESC0 != UINT32_MAX) {
|
|
|
|
timestamp.second = context_dma_rx_desc->DESC1;
|
|
|
|
timestamp.nanosecond = context_dma_rx_desc->DESC0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
__IO ETH_DMADescTypeDef *last_dma_rx_desc;
|
|
|
|
|
|
|
|
last_dma_rx_desc = heth->RxFrameInfos.LSRxDesc;
|
|
|
|
if (last_dma_rx_desc->TimeStampHigh != UINT32_MAX ||
|
|
|
|
last_dma_rx_desc->TimeStampLow != UINT32_MAX) {
|
|
|
|
timestamp.second = last_dma_rx_desc->TimeStampHigh;
|
|
|
|
timestamp.nanosecond = last_dma_rx_desc->TimeStampLow;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2020-02-11 18:50:11 +01:00
|
|
|
pkt = net_pkt_rx_alloc_with_buffer(get_iface(dev_data, *vlan_tag),
|
2021-08-05 08:02:01 +02:00
|
|
|
total_len, AF_UNSPEC, 0, K_MSEC(100));
|
2017-06-23 13:03:51 +02:00
|
|
|
if (!pkt) {
|
2018-07-09 11:51:28 +02:00
|
|
|
LOG_ERR("Failed to obtain RX buffer");
|
2017-06-23 13:03:51 +02:00
|
|
|
goto release_desc;
|
|
|
|
}
|
|
|
|
|
2022-07-30 17:19:27 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf;
|
|
|
|
rx_header; rx_header = rx_header->next) {
|
|
|
|
const size_t index = rx_header - &dma_rx_buffer_header[0];
|
|
|
|
|
|
|
|
__ASSERT_NO_MSG(index < ETH_RXBUFNB);
|
|
|
|
if (net_pkt_write(pkt, dma_rx_buffer[index], rx_header->size)) {
|
|
|
|
LOG_ERR("Failed to append RX buffer to context buffer");
|
|
|
|
net_pkt_unref(pkt);
|
|
|
|
pkt = NULL;
|
|
|
|
goto release_desc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2019-02-20 10:01:57 +01:00
|
|
|
if (net_pkt_write(pkt, dma_buffer, total_len)) {
|
2018-07-09 11:51:28 +02:00
|
|
|
LOG_ERR("Failed to append RX buffer to context buffer");
|
2017-06-23 13:03:51 +02:00
|
|
|
net_pkt_unref(pkt);
|
|
|
|
pkt = NULL;
|
|
|
|
goto release_desc;
|
|
|
|
}
|
2022-07-30 17:19:27 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
release_desc:
|
2022-07-30 17:19:27 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf;
|
|
|
|
rx_header; rx_header = rx_header->next) {
|
|
|
|
rx_header->used = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
|
2020-06-25 15:50:42 +02:00
|
|
|
hal_ret = HAL_ETH_BuildRxDescriptors(heth);
|
|
|
|
if (hal_ret != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_BuildRxDescriptors: failed: %d", hal_ret);
|
|
|
|
}
|
|
|
|
#else
|
2017-06-23 13:03:51 +02:00
|
|
|
/* Release descriptors to DMA */
|
|
|
|
/* Point to first descriptor */
|
|
|
|
dma_rx_desc = heth->RxFrameInfos.FSRxDesc;
|
|
|
|
/* Set Own bit in Rx descriptors: gives the buffers back to DMA */
|
2020-06-25 15:46:00 +02:00
|
|
|
for (int i = 0; i < heth->RxFrameInfos.SegCount; i++) {
|
2017-06-23 13:03:51 +02:00
|
|
|
dma_rx_desc->Status |= ETH_DMARXDESC_OWN;
|
|
|
|
dma_rx_desc = (ETH_DMADescTypeDef *)
|
|
|
|
(dma_rx_desc->Buffer2NextDescAddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear Segment_Count */
|
|
|
|
heth->RxFrameInfos.SegCount = 0;
|
|
|
|
|
|
|
|
/* When Rx Buffer unavailable flag is set: clear it
|
|
|
|
* and resume reception.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
if ((heth->Instance->DMASR & ETH_DMASR_RBUS) != (uint32_t)RESET) {
|
2017-06-23 13:03:51 +02:00
|
|
|
/* Clear RBUS ETHERNET DMA flag */
|
|
|
|
heth->Instance->DMASR = ETH_DMASR_RBUS;
|
|
|
|
/* Resume DMA reception */
|
|
|
|
heth->Instance->DMARPDR = 0;
|
|
|
|
}
|
2022-07-30 17:19:27 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2022-03-18 04:53:17 +01:00
|
|
|
if (!pkt) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-02-11 18:50:11 +01:00
|
|
|
#if defined(CONFIG_NET_VLAN)
|
|
|
|
struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
|
|
|
|
|
|
|
|
if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
|
|
|
|
struct net_eth_vlan_hdr *hdr_vlan =
|
|
|
|
(struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
|
|
|
|
|
|
|
|
net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
|
|
|
|
*vlan_tag = net_pkt_vlan_tag(pkt);
|
|
|
|
|
|
|
|
#if CONFIG_NET_TC_RX_COUNT > 1
|
|
|
|
enum net_priority prio;
|
|
|
|
|
|
|
|
prio = net_vlan2priority(net_pkt_vlan_priority(pkt));
|
|
|
|
net_pkt_set_priority(pkt, prio);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
net_pkt_set_iface(pkt, dev_data->iface);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_VLAN */
|
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
if (eth_is_ptp_pkt(get_iface(dev_data, *vlan_tag), pkt)) {
|
|
|
|
pkt->timestamp.second = timestamp.second;
|
|
|
|
pkt->timestamp.nanosecond = timestamp.nanosecond;
|
|
|
|
} else {
|
|
|
|
/* Invalid value */
|
|
|
|
pkt->timestamp.second = UINT64_MAX;
|
|
|
|
pkt->timestamp.nanosecond = UINT32_MAX;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2022-03-18 04:53:17 +01:00
|
|
|
out:
|
2018-12-05 14:52:59 +01:00
|
|
|
if (!pkt) {
|
2020-02-11 18:50:11 +01:00
|
|
|
eth_stats_update_errors_rx(get_iface(dev_data, *vlan_tag));
|
2018-12-05 14:52:59 +01:00
|
|
|
}
|
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
return pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rx_thread(void *arg1, void *unused1, void *unused2)
|
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev;
|
2017-06-23 13:03:51 +02:00
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
|
|
|
struct net_pkt *pkt;
|
|
|
|
int res;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t status;
|
2020-06-25 15:46:00 +02:00
|
|
|
HAL_StatusTypeDef hal_ret = HAL_OK;
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(arg1 != NULL);
|
|
|
|
ARG_UNUSED(unused1);
|
|
|
|
ARG_UNUSED(unused2);
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
dev = (const struct device *)arg1;
|
2022-01-18 15:48:18 +01:00
|
|
|
dev_data = dev->data;
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
while (1) {
|
2019-09-15 11:33:38 +02:00
|
|
|
res = k_sem_take(&dev_data->rx_int_sem,
|
|
|
|
K_MSEC(CONFIG_ETH_STM32_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS));
|
|
|
|
if (res == 0) {
|
|
|
|
/* semaphore taken, update link status and receive packets */
|
|
|
|
if (dev_data->link_up != true) {
|
|
|
|
dev_data->link_up = true;
|
2020-02-11 18:50:11 +01:00
|
|
|
net_eth_carrier_on(get_iface(dev_data,
|
|
|
|
vlan_tag));
|
2019-09-15 11:33:38 +02:00
|
|
|
}
|
2020-02-11 18:50:11 +01:00
|
|
|
while ((pkt = eth_rx(dev, &vlan_tag)) != NULL) {
|
|
|
|
res = net_recv_data(net_pkt_iface(pkt), pkt);
|
2019-09-15 11:33:38 +02:00
|
|
|
if (res < 0) {
|
2020-02-11 18:50:11 +01:00
|
|
|
eth_stats_update_errors_rx(
|
|
|
|
net_pkt_iface(pkt));
|
2019-09-15 11:33:38 +02:00
|
|
|
LOG_ERR("Failed to enqueue frame "
|
|
|
|
"into RX queue: %d", res);
|
|
|
|
net_pkt_unref(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (res == -EAGAIN) {
|
|
|
|
/* semaphore timeout period expired, check link status */
|
2020-06-25 15:46:00 +02:00
|
|
|
hal_ret = read_eth_phy_register(&dev_data->heth,
|
|
|
|
PHY_ADDR, PHY_BSR, (uint32_t *) &status);
|
|
|
|
if (hal_ret == HAL_OK) {
|
2019-09-15 11:33:38 +02:00
|
|
|
if ((status & PHY_LINKED_STATUS) == PHY_LINKED_STATUS) {
|
|
|
|
if (dev_data->link_up != true) {
|
|
|
|
dev_data->link_up = true;
|
2020-02-11 18:50:11 +01:00
|
|
|
net_eth_carrier_on(
|
|
|
|
get_iface(dev_data,
|
|
|
|
vlan_tag));
|
2019-09-15 11:33:38 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (dev_data->link_up != false) {
|
|
|
|
dev_data->link_up = false;
|
2020-02-11 18:50:11 +01:00
|
|
|
net_eth_carrier_off(
|
|
|
|
get_iface(dev_data,
|
|
|
|
vlan_tag));
|
2019-09-15 11:33:38 +02:00
|
|
|
}
|
|
|
|
}
|
2017-06-23 13:03:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void eth_isr(const struct device *dev)
|
2017-06-23 13:03:51 +02:00
|
|
|
{
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
|
|
|
ETH_HandleTypeDef *heth;
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
__ASSERT_NO_MSG(dev != NULL);
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2022-01-18 15:48:18 +01:00
|
|
|
dev_data = dev->data;
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
heth = &dev_data->heth;
|
|
|
|
|
|
|
|
__ASSERT_NO_MSG(heth != NULL);
|
|
|
|
|
|
|
|
HAL_ETH_IRQHandler(heth);
|
|
|
|
}
|
2022-05-18 16:41:43 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2020-07-27 13:44:47 +02:00
|
|
|
void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth_handle)
|
|
|
|
{
|
|
|
|
__ASSERT_NO_MSG(heth_handle != NULL);
|
|
|
|
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data =
|
|
|
|
CONTAINER_OF(heth_handle, struct eth_stm32_hal_dev_data, heth);
|
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
k_sem_give(&dev_data->tx_int_sem);
|
|
|
|
|
|
|
|
}
|
2022-05-18 16:41:43 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
|
|
|
|
2022-08-13 20:05:13 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth)
|
|
|
|
{
|
2023-01-09 14:20:23 +01:00
|
|
|
/* Do nothing */
|
|
|
|
/* Do not log errors. If errors are reported du to high traffic,
|
|
|
|
* logging errors will only increase traffic issues
|
|
|
|
*/
|
2022-08-13 20:05:13 +02:00
|
|
|
}
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
|
2020-07-27 13:44:47 +02:00
|
|
|
/* DMA and MAC errors callback only appear in H7 series */
|
|
|
|
void HAL_ETH_DMAErrorCallback(ETH_HandleTypeDef *heth_handle)
|
|
|
|
{
|
|
|
|
__ASSERT_NO_MSG(heth_handle != NULL);
|
|
|
|
|
|
|
|
LOG_ERR("%s errorcode:%x dmaerror:%x",
|
|
|
|
__func__,
|
|
|
|
HAL_ETH_GetError(heth_handle),
|
|
|
|
HAL_ETH_GetDMAError(heth_handle));
|
|
|
|
|
|
|
|
/* State of eth handle is ERROR in case of unrecoverable error */
|
|
|
|
/* unrecoverable (ETH_DMACSR_FBE | ETH_DMACSR_TPS | ETH_DMACSR_RPS) */
|
|
|
|
if (HAL_ETH_GetState(heth_handle) == HAL_ETH_STATE_ERROR) {
|
|
|
|
LOG_ERR("%s ethernet in error state", __func__);
|
|
|
|
/* TODO restart the ETH peripheral to recover */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Recoverable errors don't put ETH in error state */
|
|
|
|
/* ETH_DMACSR_CDE | ETH_DMACSR_ETI | ETH_DMACSR_RWT */
|
|
|
|
/* | ETH_DMACSR_RBU | ETH_DMACSR_AIS) */
|
|
|
|
|
|
|
|
/* TODO Check if we were TX transmitting and the unlock semaphore */
|
|
|
|
/* To return the error as soon as possible else we'll just wait */
|
|
|
|
/* for the timeout */
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
void HAL_ETH_MACErrorCallback(ETH_HandleTypeDef *heth_handle)
|
|
|
|
{
|
|
|
|
__ASSERT_NO_MSG(heth_handle != NULL);
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2020-07-27 13:44:47 +02:00
|
|
|
/* MAC errors dumping */
|
|
|
|
LOG_ERR("%s errorcode:%x macerror:%x",
|
|
|
|
__func__,
|
|
|
|
HAL_ETH_GetError(heth_handle),
|
|
|
|
HAL_ETH_GetMACError(heth_handle));
|
|
|
|
|
|
|
|
/* State of eth handle is ERROR in case of unrecoverable error */
|
|
|
|
if (HAL_ETH_GetState(heth_handle) == HAL_ETH_STATE_ERROR) {
|
|
|
|
LOG_ERR("%s ethernet in error state", __func__);
|
|
|
|
/* TODO restart or reconfig ETH peripheral to recover */
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2022-08-13 20:05:13 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth_handle)
|
|
|
|
{
|
|
|
|
__ASSERT_NO_MSG(heth_handle != NULL);
|
|
|
|
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data =
|
|
|
|
CONTAINER_OF(heth_handle, struct eth_stm32_hal_dev_data, heth);
|
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
k_sem_give(&dev_data->rx_int_sem);
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static void generate_mac(uint8_t *mac_addr)
|
2020-02-11 18:50:11 +01:00
|
|
|
{
|
2022-12-16 14:34:24 +01:00
|
|
|
#if defined(ETH_STM32_RANDOM_MAC)
|
|
|
|
/* Either CONFIG_ETH_STM32_HAL_RANDOM_MAC or device tree property */
|
|
|
|
/* "zephyr,random-mac-address" is set, generate a random mac address */
|
2020-04-23 19:13:14 +02:00
|
|
|
gen_random_mac(mac_addr, ST_OUI_B0, ST_OUI_B1, ST_OUI_B2);
|
2022-12-16 14:34:24 +01:00
|
|
|
#else /* Use user defined mac address */
|
2022-12-16 14:26:09 +01:00
|
|
|
mac_addr[0] = ST_OUI_B0;
|
|
|
|
mac_addr[1] = ST_OUI_B1;
|
|
|
|
mac_addr[2] = ST_OUI_B2;
|
2022-12-16 14:34:24 +01:00
|
|
|
#if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
|
|
|
|
mac_addr[3] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 3);
|
|
|
|
mac_addr[4] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 4);
|
|
|
|
mac_addr[5] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 5);
|
|
|
|
#elif defined(CONFIG_ETH_STM32_HAL_USER_STATIC_MAC)
|
2022-12-16 14:26:09 +01:00
|
|
|
mac_addr[3] = CONFIG_ETH_STM32_HAL_MAC3;
|
|
|
|
mac_addr[4] = CONFIG_ETH_STM32_HAL_MAC4;
|
|
|
|
mac_addr[5] = CONFIG_ETH_STM32_HAL_MAC5;
|
2022-12-16 15:31:24 +01:00
|
|
|
#else
|
|
|
|
/* Nothing defined by the user, use device id */
|
|
|
|
hwinfo_get_device_id(&mac_addr[3], 3);
|
2022-12-16 14:34:24 +01:00
|
|
|
#endif /* NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))) */
|
2020-02-11 18:50:11 +01:00
|
|
|
#endif
|
2022-12-16 14:26:09 +01:00
|
|
|
}
|
2020-02-11 18:50:11 +01:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int eth_initialize(const struct device *dev)
|
2017-06-23 13:03:51 +02:00
|
|
|
{
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
2020-05-12 14:14:53 +02:00
|
|
|
const struct eth_stm32_hal_dev_cfg *cfg;
|
2020-02-11 18:50:11 +01:00
|
|
|
ETH_HandleTypeDef *heth;
|
2020-06-25 15:46:00 +02:00
|
|
|
HAL_StatusTypeDef hal_ret = HAL_OK;
|
2018-12-07 11:09:28 +01:00
|
|
|
int ret = 0;
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev != NULL);
|
|
|
|
|
2022-01-18 15:48:18 +01:00
|
|
|
dev_data = dev->data;
|
|
|
|
cfg = dev->config;
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
__ASSERT_NO_MSG(cfg != NULL);
|
|
|
|
|
2021-02-11 18:49:24 +01:00
|
|
|
dev_data->clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2022-08-08 13:34:50 +02:00
|
|
|
if (!device_is_ready(dev_data->clock)) {
|
|
|
|
LOG_ERR("clock control device not ready");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
/* enable clock */
|
2018-12-07 11:09:28 +01:00
|
|
|
ret = clock_control_on(dev_data->clock,
|
2017-06-23 13:03:51 +02:00
|
|
|
(clock_control_subsys_t *)&cfg->pclken);
|
2018-12-07 11:09:28 +01:00
|
|
|
ret |= clock_control_on(dev_data->clock,
|
2017-06-23 13:03:51 +02:00
|
|
|
(clock_control_subsys_t *)&cfg->pclken_tx);
|
2018-12-07 11:09:28 +01:00
|
|
|
ret |= clock_control_on(dev_data->clock,
|
2017-06-23 13:03:51 +02:00
|
|
|
(clock_control_subsys_t *)&cfg->pclken_rx);
|
2022-09-03 21:50:53 +02:00
|
|
|
#if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp)
|
2018-12-07 11:09:28 +01:00
|
|
|
ret |= clock_control_on(dev_data->clock,
|
2017-06-23 13:03:51 +02:00
|
|
|
(clock_control_subsys_t *)&cfg->pclken_ptp);
|
2022-09-03 21:50:53 +02:00
|
|
|
#endif
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2018-12-07 11:09:28 +01:00
|
|
|
if (ret) {
|
|
|
|
LOG_ERR("Failed to enable ethernet clock");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2020-10-15 23:11:58 +02:00
|
|
|
/* configure pinmux */
|
2021-11-05 16:43:04 +01:00
|
|
|
ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
|
2020-10-15 23:11:58 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
LOG_ERR("Could not configure ethernet pins");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
heth = &dev_data->heth;
|
|
|
|
|
2017-12-15 09:23:23 +01:00
|
|
|
generate_mac(dev_data->mac_addr);
|
2022-12-16 14:26:09 +01:00
|
|
|
|
2017-12-15 09:23:23 +01:00
|
|
|
heth->Init.MACAddr = dev_data->mac_addr;
|
|
|
|
|
2022-05-18 16:47:08 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2020-06-25 15:50:42 +02:00
|
|
|
heth->Init.TxDesc = dma_tx_desc_tab;
|
|
|
|
heth->Init.RxDesc = dma_rx_desc_tab;
|
2022-08-17 14:21:45 +02:00
|
|
|
heth->Init.RxBuffLen = ETH_STM32_RX_BUF_SIZE;
|
2022-05-18 16:47:08 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2017-12-15 09:23:23 +01:00
|
|
|
hal_ret = HAL_ETH_Init(heth);
|
2018-05-04 14:38:41 +02:00
|
|
|
if (hal_ret == HAL_TIMEOUT) {
|
|
|
|
/* HAL Init time out. This could be linked to */
|
|
|
|
/* a recoverable error. Log the issue and continue */
|
2018-10-09 23:31:13 +02:00
|
|
|
/* driver initialisation */
|
2018-07-09 11:51:28 +02:00
|
|
|
LOG_ERR("HAL_ETH_Init Timed out");
|
2018-05-04 14:38:41 +02:00
|
|
|
} else if (hal_ret != HAL_OK) {
|
2018-07-09 11:51:28 +02:00
|
|
|
LOG_ERR("HAL_ETH_Init failed: %d", hal_ret);
|
2020-02-11 18:50:11 +01:00
|
|
|
return -EINVAL;
|
2017-12-15 09:23:23 +01:00
|
|
|
}
|
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
/* Enable timestamping of RX packets. We enable all packets to be
|
|
|
|
* timestamped to cover both IEEE 1588 and gPTP.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSENALL;
|
|
|
|
#else
|
2022-09-01 13:54:11 +02:00
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSARFE;
|
2021-09-13 09:24:53 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2022-05-18 16:47:08 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2020-06-25 15:50:42 +02:00
|
|
|
/* Tx config init: */
|
|
|
|
memset(&tx_config, 0, sizeof(ETH_TxPacketConfig));
|
|
|
|
tx_config.Attributes = ETH_TX_PACKETS_FEATURES_CSUM |
|
|
|
|
ETH_TX_PACKETS_FEATURES_CRCPAD;
|
|
|
|
tx_config.ChecksumCtrl = ETH_CHECKSUM_IPHDR_PAYLOAD_INSERT_PHDR_CALC;
|
|
|
|
tx_config.CRCPadCtrl = ETH_CRC_PAD_INSERT;
|
2022-05-18 16:47:08 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2019-09-15 11:33:38 +02:00
|
|
|
dev_data->link_up = false;
|
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
/* Initialize semaphores */
|
|
|
|
k_mutex_init(&dev_data->tx_mutex);
|
2021-03-03 21:02:05 +01:00
|
|
|
k_sem_init(&dev_data->rx_int_sem, 0, K_SEM_MAX_LIMIT);
|
2022-05-18 16:47:08 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2021-03-03 21:02:05 +01:00
|
|
|
k_sem_init(&dev_data->tx_int_sem, 0, K_SEM_MAX_LIMIT);
|
2022-05-18 16:47:08 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
2017-06-23 13:03:51 +02:00
|
|
|
|
|
|
|
/* Start interruption-poll thread */
|
|
|
|
k_thread_create(&dev_data->rx_thread, dev_data->rx_thread_stack,
|
2020-07-31 21:29:38 +02:00
|
|
|
K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack),
|
2017-06-23 13:03:51 +02:00
|
|
|
rx_thread, (void *) dev, NULL, NULL,
|
|
|
|
K_PRIO_COOP(CONFIG_ETH_STM32_HAL_RX_THREAD_PRIO),
|
|
|
|
0, K_NO_WAIT);
|
|
|
|
|
2020-08-15 11:39:39 +02:00
|
|
|
k_thread_name_set(&dev_data->rx_thread, "stm_eth");
|
|
|
|
|
2022-05-18 16:47:08 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
|
|
|
|
/* prepare tx buffer header */
|
|
|
|
for (uint16_t i = 0; i < ETH_TXBUFNB; ++i) {
|
|
|
|
dma_tx_buffer_header[i].tx_buff.buffer = dma_tx_buffer[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
hal_ret = HAL_ETH_Start_IT(heth);
|
|
|
|
#elif defined(CONFIG_SOC_SERIES_STM32H7X)
|
2020-06-25 15:50:42 +02:00
|
|
|
for (uint32_t i = 0; i < ETH_RX_DESC_CNT; i++) {
|
|
|
|
hal_ret = HAL_ETH_DescAssignMemory(heth, i, dma_rx_buffer[i],
|
|
|
|
NULL);
|
|
|
|
if (hal_ret != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_DescAssignMemory: failed: %d, i: %d",
|
|
|
|
hal_ret, i);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
hal_ret = HAL_ETH_Start_IT(heth);
|
|
|
|
#else
|
2017-06-23 13:03:51 +02:00
|
|
|
HAL_ETH_DMATxDescListInit(heth, dma_tx_desc_tab,
|
|
|
|
&dma_tx_buffer[0][0], ETH_TXBUFNB);
|
|
|
|
HAL_ETH_DMARxDescListInit(heth, dma_rx_desc_tab,
|
|
|
|
&dma_rx_buffer[0][0], ETH_RXBUFNB);
|
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
hal_ret = HAL_ETH_Start(heth);
|
2022-05-18 16:47:08 +02:00
|
|
|
#endif /* CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2020-06-25 15:46:00 +02:00
|
|
|
if (hal_ret != HAL_OK) {
|
|
|
|
LOG_ERR("HAL_ETH_Start{_IT} failed");
|
|
|
|
}
|
2017-06-23 13:03:51 +02:00
|
|
|
|
2021-10-04 11:44:47 +02:00
|
|
|
setup_mac_filter(heth);
|
2017-06-30 18:14:19 +02:00
|
|
|
|
2022-05-18 16:47:08 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2020-06-25 15:50:42 +02:00
|
|
|
/* Adjust MDC clock range depending on HCLK frequency: */
|
|
|
|
HAL_ETH_SetMDIOClockRange(heth);
|
|
|
|
|
|
|
|
/* @TODO: read duplex mode and speed from PHY and set it to ETH */
|
|
|
|
|
|
|
|
ETH_MACConfigTypeDef mac_config;
|
|
|
|
|
|
|
|
HAL_ETH_GetMACConfig(heth, &mac_config);
|
|
|
|
mac_config.DuplexMode = ETH_FULLDUPLEX_MODE;
|
|
|
|
mac_config.Speed = ETH_SPEED_100M;
|
|
|
|
HAL_ETH_SetMACConfig(heth, &mac_config);
|
2022-05-18 16:47:08 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_ETH_STM32_HAL_API_V2 */
|
2020-06-25 15:50:42 +02:00
|
|
|
|
2018-07-09 11:51:28 +02:00
|
|
|
LOG_DBG("MAC %02x:%02x:%02x:%02x:%02x:%02x",
|
|
|
|
dev_data->mac_addr[0], dev_data->mac_addr[1],
|
|
|
|
dev_data->mac_addr[2], dev_data->mac_addr[3],
|
|
|
|
dev_data->mac_addr[4], dev_data->mac_addr[5]);
|
2017-12-15 09:23:23 +01:00
|
|
|
|
2020-02-11 18:50:11 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV6)
|
|
|
|
static void add_ipv6_multicast_addr(const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
|
|
|
if (net_ipv6_is_addr_unspecified(&multicast_ipv6_joined_addrs[i])) {
|
|
|
|
net_ipv6_addr_copy_raw((uint8_t *)&multicast_ipv6_joined_addrs[i],
|
|
|
|
(uint8_t *)addr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_ipv6_multicast_addr(const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
|
|
|
if (net_ipv6_addr_cmp_raw(&multicast_ipv6_joined_addrs[i], addr)) {
|
|
|
|
net_ipv6_addr_copy_raw((uint8_t *)&multicast_ipv6_joined_addrs[i],
|
|
|
|
(uint8_t *)net_ipv6_unspecified_address);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_NATIVE_IPV6 */
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4)
|
|
|
|
static void add_ipv4_multicast_addr(const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
|
|
|
|
if (net_ipv4_is_addr_unspecified(&multicast_ipv4_joined_addrs[i])) {
|
|
|
|
net_ipv4_addr_copy_raw((uint8_t *)&multicast_ipv4_joined_addrs[i],
|
|
|
|
(uint8_t *)addr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_ipv4_multicast_addr(const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
|
|
|
|
if (net_ipv4_addr_cmp_raw((uint8_t *)&multicast_ipv4_joined_addrs[i],
|
|
|
|
(uint8_t *)addr)) {
|
|
|
|
multicast_ipv4_joined_addrs[i].s_addr = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_NATIVE_IPV4 */
|
|
|
|
|
2021-10-04 11:44:47 +02:00
|
|
|
static uint32_t reverse(uint32_t val)
|
|
|
|
{
|
|
|
|
uint32_t res = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
if (val & (1 << i)) {
|
|
|
|
res |= 1 << (31 - i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
static void net_if_stm32_mcast_cb(struct net_if *iface,
|
2021-10-04 11:44:47 +02:00
|
|
|
const struct net_addr *addr,
|
|
|
|
bool is_joined)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
|
|
|
|
const struct device *dev;
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
|
|
|
ETH_HandleTypeDef *heth;
|
|
|
|
struct net_eth_addr mac_addr;
|
|
|
|
uint32_t crc;
|
|
|
|
uint32_t hash_table[2];
|
|
|
|
uint32_t hash_index;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
dev = net_if_get_device(iface);
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
dev_data = (struct eth_stm32_hal_dev_data *)dev->data;
|
2021-10-04 11:44:47 +02:00
|
|
|
|
|
|
|
heth = &dev_data->heth;
|
|
|
|
|
|
|
|
hash_table[0] = 0;
|
|
|
|
hash_table[1] = 0;
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
if (is_joined) {
|
|
|
|
/* Save a copy of the hash table which we update with
|
|
|
|
* the hash for a single multicast address for join
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
hash_table[0] = heth->Instance->MACHT0R;
|
|
|
|
hash_table[1] = heth->Instance->MACHT1R;
|
|
|
|
#else
|
|
|
|
hash_table[0] = heth->Instance->MACHTLR;
|
|
|
|
hash_table[1] = heth->Instance->MACHTHR;
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
2021-10-04 11:44:47 +02:00
|
|
|
}
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
k_mutex_lock(&multicast_addr_lock, K_FOREVER);
|
2021-10-04 11:44:47 +02:00
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV6)
|
|
|
|
if (is_joined) {
|
|
|
|
/* When joining only update the hash filter with the joining
|
|
|
|
* multicast address.
|
|
|
|
*/
|
|
|
|
add_ipv6_multicast_addr(&addr->in6_addr);
|
|
|
|
|
|
|
|
net_eth_ipv6_mcast_to_mac_addr(&addr->in6_addr, &mac_addr);
|
2021-10-04 11:44:47 +02:00
|
|
|
crc = reverse(crc32_ieee(mac_addr.addr,
|
2023-01-17 06:31:15 +01:00
|
|
|
sizeof(struct net_eth_addr)));
|
2021-10-04 11:44:47 +02:00
|
|
|
hash_index = (crc >> 26) & 0x3f;
|
|
|
|
hash_table[hash_index / 32] |= (1 << (hash_index % 32));
|
2023-01-17 06:31:15 +01:00
|
|
|
} else {
|
|
|
|
/* When leaving its better to compute the full hash table
|
|
|
|
* for all the multicast addresses that we're aware of.
|
|
|
|
*/
|
|
|
|
remove_ipv6_multicast_addr(&addr->in6_addr);
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
|
|
|
if (net_ipv6_is_addr_unspecified(&multicast_ipv6_joined_addrs[i])) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_eth_ipv6_mcast_to_mac_addr(&multicast_ipv6_joined_addrs[i],
|
|
|
|
&mac_addr);
|
|
|
|
crc = reverse(crc32_ieee(mac_addr.addr,
|
|
|
|
sizeof(struct net_eth_addr)));
|
|
|
|
hash_index = (crc >> 26) & 0x3f;
|
|
|
|
hash_table[hash_index / 32] |= (1 << (hash_index % 32));
|
|
|
|
}
|
2021-10-04 11:44:47 +02:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPV6 */
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4)
|
2023-01-17 06:31:15 +01:00
|
|
|
if (is_joined) {
|
|
|
|
/* When joining only update the hash filter with the joining
|
|
|
|
* multicast address.
|
|
|
|
*/
|
|
|
|
add_ipv4_multicast_addr(&addr->in_addr);
|
2021-10-04 11:44:47 +02:00
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
net_eth_ipv4_mcast_to_mac_addr(&addr->in_addr, &mac_addr);
|
2021-10-04 11:44:47 +02:00
|
|
|
crc = reverse(crc32_ieee(mac_addr.addr,
|
2023-01-17 06:31:15 +01:00
|
|
|
sizeof(struct net_eth_addr)));
|
2021-10-04 11:44:47 +02:00
|
|
|
hash_index = (crc >> 26) & 0x3f;
|
|
|
|
hash_table[hash_index / 32] |= (1 << (hash_index % 32));
|
2023-01-17 06:31:15 +01:00
|
|
|
} else {
|
|
|
|
/* When leaving its better to compute the full hash table
|
|
|
|
* for all the multicast addresses that we're aware of.
|
|
|
|
*/
|
|
|
|
remove_ipv4_multicast_addr(&addr->in_addr);
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
|
|
|
|
if (net_ipv4_is_addr_unspecified(&multicast_ipv4_joined_addrs[i])) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_eth_ipv4_mcast_to_mac_addr(&multicast_ipv4_joined_addrs[i],
|
|
|
|
&mac_addr);
|
|
|
|
crc = reverse(crc32_ieee(mac_addr.addr,
|
|
|
|
sizeof(struct net_eth_addr)));
|
|
|
|
hash_index = (crc >> 26) & 0x3f;
|
|
|
|
hash_table[hash_index / 32] |= (1 << (hash_index % 32));
|
|
|
|
}
|
2021-10-04 11:44:47 +02:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_NET_IPV4 */
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
k_mutex_unlock(&multicast_addr_lock);
|
|
|
|
|
2021-10-04 11:44:47 +02:00
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACHT0R = hash_table[0];
|
|
|
|
heth->Instance->MACHT1R = hash_table[1];
|
|
|
|
#else
|
|
|
|
heth->Instance->MACHTLR = hash_table[0];
|
|
|
|
heth->Instance->MACHTHR = hash_table[1];
|
2023-01-17 06:31:15 +01:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
2021-10-04 11:44:47 +02:00
|
|
|
}
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
#endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
|
|
|
|
|
2020-02-11 18:50:11 +01:00
|
|
|
static void eth_iface_init(struct net_if *iface)
|
|
|
|
{
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev;
|
2020-02-11 18:50:11 +01:00
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
2021-05-12 11:34:20 +02:00
|
|
|
bool is_first_init = false;
|
2020-02-11 18:50:11 +01:00
|
|
|
|
|
|
|
__ASSERT_NO_MSG(iface != NULL);
|
|
|
|
|
|
|
|
dev = net_if_get_device(iface);
|
|
|
|
__ASSERT_NO_MSG(dev != NULL);
|
|
|
|
|
2022-01-18 15:48:18 +01:00
|
|
|
dev_data = dev->data;
|
2020-02-11 18:50:11 +01:00
|
|
|
__ASSERT_NO_MSG(dev_data != NULL);
|
|
|
|
|
|
|
|
/* For VLAN, this value is only used to get the correct L2 driver.
|
|
|
|
* The iface pointer in context should contain the main interface
|
|
|
|
* if the VLANs are enabled.
|
|
|
|
*/
|
|
|
|
if (dev_data->iface == NULL) {
|
|
|
|
dev_data->iface = iface;
|
2021-05-12 11:34:20 +02:00
|
|
|
is_first_init = true;
|
2020-02-11 18:50:11 +01:00
|
|
|
}
|
|
|
|
|
2023-01-17 06:31:15 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
|
|
|
|
net_if_mcast_mon_register(&mcast_monitor, iface, net_if_stm32_mcast_cb);
|
|
|
|
#endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
|
2021-10-04 11:44:47 +02:00
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
/* Register Ethernet MAC Address with the upper layer */
|
|
|
|
net_if_set_link_addr(iface, dev_data->mac_addr,
|
|
|
|
sizeof(dev_data->mac_addr),
|
|
|
|
NET_LINK_ETHERNET);
|
2018-07-02 20:52:52 +02:00
|
|
|
|
|
|
|
ethernet_init(iface);
|
2020-02-11 18:50:11 +01:00
|
|
|
|
2022-09-30 14:53:44 +02:00
|
|
|
net_if_carrier_off(iface);
|
2021-05-12 11:34:20 +02:00
|
|
|
|
2022-11-11 12:48:01 +01:00
|
|
|
net_lldp_set_lldpdu(iface);
|
|
|
|
|
2021-05-12 11:34:20 +02:00
|
|
|
if (is_first_init) {
|
2022-01-18 15:48:18 +01:00
|
|
|
const struct eth_stm32_hal_dev_cfg *cfg = dev->config;
|
2021-05-12 11:34:20 +02:00
|
|
|
/* Now that the iface is setup, we are safe to enable IRQs. */
|
2022-01-18 15:48:18 +01:00
|
|
|
__ASSERT_NO_MSG(cfg->config_func != NULL);
|
|
|
|
cfg->config_func();
|
2021-05-12 11:34:20 +02:00
|
|
|
}
|
2017-06-23 13:03:51 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static enum ethernet_hw_caps eth_stm32_hal_get_capabilities(const struct device *dev)
|
2018-03-27 12:29:33 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
2020-02-11 18:50:11 +01:00
|
|
|
return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T
|
|
|
|
#if defined(CONFIG_NET_VLAN)
|
|
|
|
| ETHERNET_HW_VLAN
|
2021-10-06 12:53:31 +02:00
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
|
|
|
|
| ETHERNET_PROMISC_MODE
|
2021-09-13 09:24:53 +02:00
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
| ETHERNET_PTP
|
2022-11-11 12:48:01 +01:00
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NET_LLDP)
|
|
|
|
| ETHERNET_LLDP
|
2022-06-16 15:39:30 +02:00
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_ETH_STM32_HW_CHECKSUM)
|
|
|
|
| ETHERNET_HW_RX_CHKSUM_OFFLOAD
|
|
|
|
| ETHERNET_HW_TX_CHKSUM_OFFLOAD
|
2020-02-11 18:50:11 +01:00
|
|
|
#endif
|
|
|
|
;
|
2018-03-27 12:29:33 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int eth_stm32_hal_set_config(const struct device *dev,
|
2019-09-19 14:19:16 +02:00
|
|
|
enum ethernet_config_type type,
|
|
|
|
const struct ethernet_config *config)
|
|
|
|
{
|
2021-10-06 12:53:31 +02:00
|
|
|
int ret = -ENOTSUP;
|
2019-09-19 14:19:16 +02:00
|
|
|
struct eth_stm32_hal_dev_data *dev_data;
|
|
|
|
ETH_HandleTypeDef *heth;
|
|
|
|
|
2022-01-18 15:48:18 +01:00
|
|
|
dev_data = dev->data;
|
2021-10-06 12:53:31 +02:00
|
|
|
heth = &dev_data->heth;
|
|
|
|
|
2019-09-19 14:19:16 +02:00
|
|
|
switch (type) {
|
|
|
|
case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
|
|
|
|
memcpy(dev_data->mac_addr, config->mac_address.addr, 6);
|
|
|
|
heth->Instance->MACA0HR = (dev_data->mac_addr[5] << 8) |
|
|
|
|
dev_data->mac_addr[4];
|
|
|
|
heth->Instance->MACA0LR = (dev_data->mac_addr[3] << 24) |
|
|
|
|
(dev_data->mac_addr[2] << 16) |
|
|
|
|
(dev_data->mac_addr[1] << 8) |
|
|
|
|
dev_data->mac_addr[0];
|
2020-11-05 16:14:06 +01:00
|
|
|
net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
|
|
|
|
sizeof(dev_data->mac_addr),
|
|
|
|
NET_LINK_ETHERNET);
|
2021-10-06 12:53:31 +02:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
|
|
|
|
#if defined(CONFIG_NET_PROMISCUOUS_MODE)
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
if (config->promisc_mode) {
|
|
|
|
heth->Instance->MACPFR |= ETH_MACPFR_PR;
|
|
|
|
} else {
|
|
|
|
heth->Instance->MACPFR &= ~ETH_MACPFR_PR;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (config->promisc_mode) {
|
|
|
|
heth->Instance->MACFFR |= ETH_MACFFR_PM;
|
|
|
|
} else {
|
|
|
|
heth->Instance->MACFFR &= ~ETH_MACFFR_PM;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
ret = 0;
|
|
|
|
#endif /* CONFIG_NET_PROMISCUOUS_MODE */
|
|
|
|
break;
|
2019-09-19 14:19:16 +02:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-03-08 14:25:18 +01:00
|
|
|
return ret;
|
2019-09-19 14:19:16 +02:00
|
|
|
}
|
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
static const struct device *eth_stm32_get_ptp_clock(const struct device *dev)
|
|
|
|
{
|
|
|
|
struct eth_stm32_hal_dev_data *dev_data = dev->data;
|
|
|
|
|
|
|
|
return dev_data->ptp_clock;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
|
|
|
|
2018-03-27 12:29:33 +02:00
|
|
|
static const struct ethernet_api eth_api = {
|
|
|
|
.iface_api.init = eth_iface_init,
|
2021-09-13 09:24:53 +02:00
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
.get_ptp_clock = eth_stm32_get_ptp_clock,
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|
2018-03-27 12:29:33 +02:00
|
|
|
.get_capabilities = eth_stm32_hal_get_capabilities,
|
2019-09-19 14:19:16 +02:00
|
|
|
.set_config = eth_stm32_hal_set_config,
|
2018-06-26 14:51:05 +02:00
|
|
|
.send = eth_tx,
|
2017-06-23 13:03:51 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static void eth0_irq_config(void)
|
|
|
|
{
|
2020-07-15 13:08:06 +02:00
|
|
|
IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_isr,
|
2020-12-16 18:17:24 +01:00
|
|
|
DEVICE_DT_INST_GET(0), 0);
|
2020-07-15 13:08:06 +02:00
|
|
|
irq_enable(DT_INST_IRQN(0));
|
2017-06-23 13:03:51 +02:00
|
|
|
}
|
|
|
|
|
2021-12-23 12:33:03 +01:00
|
|
|
PINCTRL_DT_INST_DEFINE(0);
|
2020-10-15 23:11:58 +02:00
|
|
|
|
2017-06-23 13:03:51 +02:00
|
|
|
static const struct eth_stm32_hal_dev_cfg eth0_config = {
|
|
|
|
.config_func = eth0_irq_config,
|
2020-07-15 13:08:06 +02:00
|
|
|
.pclken = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bus),
|
|
|
|
.enr = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bits)},
|
|
|
|
.pclken_tx = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bus),
|
|
|
|
.enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bits)},
|
|
|
|
.pclken_rx = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bus),
|
|
|
|
.enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bits)},
|
2022-09-03 21:50:53 +02:00
|
|
|
#if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp)
|
2020-07-15 13:08:06 +02:00
|
|
|
.pclken_ptp = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_ptp, bus),
|
|
|
|
.enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_ptp, bits)},
|
2022-09-03 21:50:53 +02:00
|
|
|
#endif
|
2021-11-05 16:43:04 +01:00
|
|
|
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
|
2017-06-23 13:03:51 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct eth_stm32_hal_dev_data eth0_data = {
|
|
|
|
.heth = {
|
2020-07-15 13:08:06 +02:00
|
|
|
.Instance = (ETH_TypeDef *)DT_INST_REG_ADDR(0),
|
2017-06-23 13:03:51 +02:00
|
|
|
.Init = {
|
2022-07-30 18:19:48 +02:00
|
|
|
#if !defined(CONFIG_SOC_SERIES_STM32H7X) && !defined(CONFIG_ETH_STM32_HAL_API_V2)
|
2021-06-23 20:53:20 +02:00
|
|
|
#if defined(CONFIG_ETH_STM32_AUTO_NEGOTIATION_ENABLE)
|
2017-06-23 13:03:51 +02:00
|
|
|
.AutoNegotiation = ETH_AUTONEGOTIATION_ENABLE,
|
2021-06-23 20:53:20 +02:00
|
|
|
#else
|
|
|
|
.AutoNegotiation = ETH_AUTONEGOTIATION_DISABLE,
|
2022-06-16 16:31:26 +02:00
|
|
|
.Speed = IS_ENABLED(CONFIG_ETH_STM32_SPEED_10M) ?
|
|
|
|
ETH_SPEED_10M : ETH_SPEED_100M,
|
|
|
|
.DuplexMode = IS_ENABLED(CONFIG_ETH_STM32_MODE_HALFDUPLEX) ?
|
|
|
|
ETH_MODE_HALFDUPLEX : ETH_MODE_FULLDUPLEX,
|
2021-06-23 20:53:20 +02:00
|
|
|
#endif /* !CONFIG_ETH_STM32_AUTO_NEGOTIATION_ENABLE */
|
2020-06-25 15:46:00 +02:00
|
|
|
.PhyAddress = PHY_ADDR,
|
2017-06-23 13:03:51 +02:00
|
|
|
.RxMode = ETH_RXINTERRUPT_MODE,
|
2022-06-16 16:31:26 +02:00
|
|
|
.ChecksumMode = IS_ENABLED(CONFIG_ETH_STM32_HW_CHECKSUM) ?
|
|
|
|
ETH_CHECKSUM_BY_HARDWARE : ETH_CHECKSUM_BY_SOFTWARE,
|
2020-06-25 15:50:42 +02:00
|
|
|
#endif /* !CONFIG_SOC_SERIES_STM32H7X */
|
2022-06-16 16:31:26 +02:00
|
|
|
.MediaInterface = IS_ENABLED(CONFIG_ETH_STM32_HAL_MII) ?
|
|
|
|
ETH_MEDIA_INTERFACE_MII : ETH_MEDIA_INTERFACE_RMII,
|
2017-06-23 13:03:51 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-12-16 18:17:24 +01:00
|
|
|
ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_initialize,
|
2021-04-28 10:43:35 +02:00
|
|
|
NULL, ð0_data, ð0_config,
|
2020-02-25 10:45:25 +01:00
|
|
|
CONFIG_ETH_INIT_PRIORITY, ð_api, ETH_STM32_HAL_MTU);
|
2021-09-13 09:24:53 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_PTP_CLOCK_STM32_HAL)
|
|
|
|
|
|
|
|
struct ptp_context {
|
|
|
|
struct eth_stm32_hal_dev_data *eth_dev_data;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct ptp_context ptp_stm32_0_context;
|
|
|
|
|
|
|
|
static int ptp_clock_stm32_set(const struct device *dev,
|
|
|
|
struct net_ptp_time *tm)
|
|
|
|
{
|
|
|
|
struct ptp_context *ptp_context = dev->data;
|
|
|
|
struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
|
|
|
|
ETH_HandleTypeDef *heth = ð_dev_data->heth;
|
2022-07-13 16:50:25 +02:00
|
|
|
unsigned int key;
|
2021-09-13 09:24:53 +02:00
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACSTSUR = tm->second;
|
|
|
|
heth->Instance->MACSTNUR = tm->nanosecond;
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSINIT;
|
|
|
|
while (heth->Instance->MACTSCR & ETH_MACTSCR_TSINIT_Msk) {
|
|
|
|
/* spin lock */
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSHUR = tm->second;
|
|
|
|
heth->Instance->PTPTSLUR = tm->nanosecond;
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTI;
|
|
|
|
while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTI_Msk) {
|
|
|
|
/* spin lock */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ptp_clock_stm32_get(const struct device *dev,
|
|
|
|
struct net_ptp_time *tm)
|
|
|
|
{
|
|
|
|
struct ptp_context *ptp_context = dev->data;
|
|
|
|
struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
|
|
|
|
ETH_HandleTypeDef *heth = ð_dev_data->heth;
|
2022-07-13 16:50:25 +02:00
|
|
|
unsigned int key;
|
2022-01-18 08:19:47 +01:00
|
|
|
uint32_t second_2;
|
2021-09-13 09:24:53 +02:00
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
tm->second = heth->Instance->MACSTSR;
|
|
|
|
tm->nanosecond = heth->Instance->MACSTNR;
|
2022-01-18 08:19:47 +01:00
|
|
|
second_2 = heth->Instance->MACSTSR;
|
2021-09-13 09:24:53 +02:00
|
|
|
#else
|
|
|
|
tm->second = heth->Instance->PTPTSHR;
|
|
|
|
tm->nanosecond = heth->Instance->PTPTSLR;
|
2022-01-18 08:19:47 +01:00
|
|
|
second_2 = heth->Instance->PTPTSHR;
|
2021-09-13 09:24:53 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
2022-01-18 08:19:47 +01:00
|
|
|
if (tm->second != second_2 && tm->nanosecond < NSEC_PER_SEC / 2) {
|
|
|
|
/* Second rollover has happened during first measurement: second register
|
|
|
|
* was read before second boundary and nanosecond register was read after.
|
|
|
|
* We will use second_2 as a new second value.
|
|
|
|
*/
|
|
|
|
tm->second = second_2;
|
|
|
|
}
|
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ptp_clock_stm32_adjust(const struct device *dev, int increment)
|
|
|
|
{
|
|
|
|
struct ptp_context *ptp_context = dev->data;
|
|
|
|
struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
|
|
|
|
ETH_HandleTypeDef *heth = ð_dev_data->heth;
|
|
|
|
int key, ret;
|
|
|
|
|
|
|
|
if ((increment <= (int32_t)(-NSEC_PER_SEC)) ||
|
|
|
|
(increment >= (int32_t)NSEC_PER_SEC)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACSTSUR = 0;
|
|
|
|
if (increment >= 0) {
|
|
|
|
heth->Instance->MACSTNUR = increment;
|
|
|
|
} else {
|
|
|
|
heth->Instance->MACSTNUR = ETH_MACSTNUR_ADDSUB | (NSEC_PER_SEC + increment);
|
|
|
|
}
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSUPDT;
|
|
|
|
while (heth->Instance->MACTSCR & ETH_MACTSCR_TSUPDT_Msk) {
|
|
|
|
/* spin lock */
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSHUR = 0;
|
|
|
|
if (increment >= 0) {
|
|
|
|
heth->Instance->PTPTSLUR = increment;
|
|
|
|
} else {
|
|
|
|
heth->Instance->PTPTSLUR = ETH_PTPTSLUR_TSUPNS | (-increment);
|
|
|
|
}
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTU;
|
|
|
|
while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTU_Msk) {
|
|
|
|
/* spin lock */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-27 09:49:20 +01:00
|
|
|
static int ptp_clock_stm32_rate_adjust(const struct device *dev, double ratio)
|
2021-09-13 09:24:53 +02:00
|
|
|
{
|
|
|
|
struct ptp_context *ptp_context = dev->data;
|
|
|
|
struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
|
|
|
|
ETH_HandleTypeDef *heth = ð_dev_data->heth;
|
|
|
|
int key, ret;
|
|
|
|
uint32_t addend_val;
|
|
|
|
|
|
|
|
/* No change needed */
|
|
|
|
if (ratio == 1.0f) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
ratio *= eth_dev_data->clk_ratio_adj;
|
|
|
|
|
|
|
|
/* Limit possible ratio */
|
|
|
|
if (ratio * 100 < CONFIG_ETH_STM32_HAL_PTP_CLOCK_ADJ_MIN_PCT ||
|
|
|
|
ratio * 100 > CONFIG_ETH_STM32_HAL_PTP_CLOCK_ADJ_MAX_PCT) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Save new ratio */
|
|
|
|
eth_dev_data->clk_ratio_adj = ratio;
|
|
|
|
|
|
|
|
/* Update addend register */
|
|
|
|
addend_val = UINT32_MAX * eth_dev_data->clk_ratio * ratio;
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACTSAR = addend_val;
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSADDREG;
|
|
|
|
while (heth->Instance->MACTSCR & ETH_MACTSCR_TSADDREG_Msk) {
|
|
|
|
/* spin lock */
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSAR = addend_val;
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSARU;
|
|
|
|
while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU_Msk) {
|
|
|
|
/* spin lock */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ptp_clock_driver_api api = {
|
|
|
|
.set = ptp_clock_stm32_set,
|
|
|
|
.get = ptp_clock_stm32_get,
|
|
|
|
.adjust = ptp_clock_stm32_adjust,
|
|
|
|
.rate_adjust = ptp_clock_stm32_rate_adjust,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ptp_stm32_init(const struct device *port)
|
|
|
|
{
|
2022-08-22 10:36:10 +02:00
|
|
|
const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(mac));
|
2022-01-18 15:48:18 +01:00
|
|
|
struct eth_stm32_hal_dev_data *eth_dev_data = dev->data;
|
|
|
|
const struct eth_stm32_hal_dev_cfg *eth_cfg = dev->config;
|
2021-09-13 09:24:53 +02:00
|
|
|
struct ptp_context *ptp_context = port->data;
|
|
|
|
ETH_HandleTypeDef *heth = ð_dev_data->heth;
|
|
|
|
int ret;
|
|
|
|
uint32_t ptp_hclk_rate;
|
|
|
|
uint32_t ss_incr_ns;
|
|
|
|
uint32_t addend_val;
|
|
|
|
|
|
|
|
eth_dev_data->ptp_clock = port;
|
|
|
|
ptp_context->eth_dev_data = eth_dev_data;
|
|
|
|
|
|
|
|
/* Mask the Timestamp Trigger interrupt */
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACIER &= ~(ETH_MACIER_TSIE);
|
|
|
|
#else
|
|
|
|
heth->Instance->MACIMR &= ~(ETH_MACIMR_TSTIM);
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
/* Enable timestamping */
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSENA;
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSE;
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
/* Query ethernet clock rate */
|
|
|
|
ret = clock_control_get_rate(eth_dev_data->clock,
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
(clock_control_subsys_t *)ð_cfg->pclken,
|
|
|
|
#else
|
|
|
|
(clock_control_subsys_t *)ð_cfg->pclken_ptp,
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
&ptp_hclk_rate);
|
|
|
|
if (ret) {
|
|
|
|
LOG_ERR("Failed to query ethernet clock");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Program the subsecond increment register based on the PTP clock freq */
|
|
|
|
if (NSEC_PER_SEC % CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ != 0) {
|
|
|
|
LOG_ERR("PTP clock period must be an integer nanosecond value");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ss_incr_ns = NSEC_PER_SEC / CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ;
|
|
|
|
if (ss_incr_ns > UINT8_MAX) {
|
|
|
|
LOG_ERR("PTP clock period is more than %d nanoseconds", UINT8_MAX);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACSSIR = ss_incr_ns << ETH_MACMACSSIR_SSINC_Pos;
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPSSIR = ss_incr_ns;
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
/* Program timestamp addend register */
|
|
|
|
eth_dev_data->clk_ratio =
|
|
|
|
((double)CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ) / ((double)ptp_hclk_rate);
|
|
|
|
/*
|
|
|
|
* clk_ratio is a ratio between desired PTP clock frequency and HCLK rate.
|
|
|
|
* Because HCLK is defined by a physical oscillator, it might drift due
|
|
|
|
* to manufacturing tolerances and environmental effects (e.g. temperature).
|
|
|
|
* clk_ratio_adj compensates for such inaccuracies. It starts off as 1.0
|
|
|
|
* and gets adjusted by calling ptp_clock_stm32_rate_adjust().
|
|
|
|
*/
|
|
|
|
eth_dev_data->clk_ratio_adj = 1.0f;
|
|
|
|
addend_val =
|
|
|
|
UINT32_MAX * eth_dev_data->clk_ratio * eth_dev_data->clk_ratio_adj;
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACTSAR = addend_val;
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSADDREG;
|
|
|
|
while (heth->Instance->MACTSCR & ETH_MACTSCR_TSADDREG_Msk) {
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSAR = addend_val;
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSARU;
|
|
|
|
while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU_Msk) {
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
/* Enable fine timestamp correction method */
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSCFUPDT;
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSFCU;
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
/* Enable nanosecond rollover into a new second */
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSCTRLSSR;
|
|
|
|
#else
|
2022-09-01 13:54:11 +02:00
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSSR;
|
2021-09-13 09:24:53 +02:00
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
|
|
|
/* Initialize timestamp */
|
|
|
|
#if defined(CONFIG_SOC_SERIES_STM32H7X)
|
|
|
|
heth->Instance->MACSTSUR = 0;
|
|
|
|
heth->Instance->MACSTNUR = 0;
|
|
|
|
heth->Instance->MACTSCR |= ETH_MACTSCR_TSINIT;
|
|
|
|
while (heth->Instance->MACTSCR & ETH_MACTSCR_TSINIT_Msk) {
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
heth->Instance->PTPTSHUR = 0;
|
|
|
|
heth->Instance->PTPTSLUR = 0;
|
|
|
|
heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTI;
|
|
|
|
while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTI_Msk) {
|
|
|
|
k_yield();
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32H7X */
|
|
|
|
|
2023-01-17 16:36:20 +01:00
|
|
|
#if defined(CONFIG_ETH_STM32_HAL_API_V2)
|
|
|
|
/* Set PTP Configuration done */
|
|
|
|
heth->IsPtpConfigured = HAL_ETH_PTP_CONFIGURATED;
|
|
|
|
#endif
|
|
|
|
|
2021-09-13 09:24:53 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEVICE_DEFINE(stm32_ptp_clock_0, PTP_CLOCK_NAME, ptp_stm32_init,
|
|
|
|
NULL, &ptp_stm32_0_context, NULL, POST_KERNEL,
|
|
|
|
CONFIG_ETH_STM32_HAL_PTP_CLOCK_INIT_PRIO, &api);
|
|
|
|
|
|
|
|
#endif /* CONFIG_PTP_CLOCK_STM32_HAL */
|