added xilinx lwip drivers, outgoing is working incoming TODO

This commit is contained in:
Ulrich Mohr 2024-08-19 16:52:10 +02:00
parent fe5629fa85
commit 910553df45
39 changed files with 9937 additions and 7 deletions

View File

@ -46,15 +46,16 @@ add_executable(${TARGET_NAME})
# lwip
set (LWIP_INCLUDE_DIRS
"${LWIP_DIR}/src/include"
"bsp_z7/lwip"
"bsp_z7/lwip/include"
)
#include(${LWIP_DIR}/src/Filelists.cmake)
include(${LWIP_DIR}/src/Filelists.cmake)
set(lwip_SRCS
${lwipcore_SRCS}
${lwipcore4_SRCS}
${lwipcore6_SRCS}
${LWIP_DIR}/src/netif/slipif.c
${LWIP_DIR}/src/apps/tftp/tftp.c
${lwipnetif_SRCS}
#${LWIP_DIR}/src/netif/slipif.c
#${LWIP_DIR}/src/apps/tftp/tftp.c
)
if(${CMAKE_CROSSCOMPILING})
add_library(lwip ${lwip_SRCS})

View File

@ -1,6 +1,8 @@
add_subdirectory(freeRTOS)
add_subdirectory(ps7_cortexa9_0)
add_subdirectory(lwip)
add_subdirectory(newlib)
add_subdirectory(hardware)
target_sources(${TARGET_NAME} PRIVATE main.c)
target_sources(${TARGET_NAME} PRIVATE main.c testEth.c)

View File

@ -0,0 +1 @@
add_subdirectory(netif)

View File

@ -219,7 +219,7 @@ a lot of data that needs to be copied, this should be set high. */
#define TCPIP_MBOX_SIZE 5
/* ---------- ARP options ---------- */
#define LWIP_ARP 0
#define LWIP_ARP 1
#define ARP_TABLE_SIZE 10
#define ARP_QUEUEING 1
#define ARP_QUEUE_LEN 10

View File

@ -0,0 +1,88 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XADAPTER_H_
#define __XADAPTER_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "lwipopts.h"
#if !NO_SYS
#include "lwip/sys.h"
#endif
#include "lwip/netif.h"
#include "lwip/ip.h"
#include "netif/xtopology.h"
struct xemac_s {
enum xemac_types type;
int topology_index;
void *state;
#if !NO_SYS
sys_sem_t sem_rx_data_available;
#if defined(__arm__) && !defined(ARMR5)
TimerHandle_t xTimer;
#endif
#endif
};
enum ethernet_link_status {
ETH_LINK_UNDEFINED = 0,
ETH_LINK_UP,
ETH_LINK_DOWN,
ETH_LINK_NEGOTIATING
};
void eth_link_detect(struct netif *netif);
void lwip_raw_init();
int xemacif_input(struct netif *netif);
void xemacif_input_thread(struct netif *netif);
struct netif * xemac_add(struct netif *netif,
ip_addr_t *ipaddr, ip_addr_t *netmask, ip_addr_t *gw,
unsigned char *mac_ethernet_address,
UINTPTR mac_baseaddr);
#if defined (__arm__) || defined (__aarch64__)
void xemacpsif_resetrx_on_no_rxdata(struct netif *netif);
#endif
/* global lwip debug variable used for debugging */
extern int lwip_runtime_debug;
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,137 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __NETIF_XAXIEMACIF_H__
#define __NETIF_XAXIEMACIF_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "xlwipconfig.h"
#include "lwip/netif.h"
#include "netif/etharp.h"
#include "netif/xadapter.h"
#include "xparameters.h"
#include "xstatus.h"
#include "xaxiethernet.h"
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
#include "xllfifo.h"
#elif XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
#include "xmcdma.h"
#else
#include "xaxidma.h"
#include "xaxidma_hw.h"
#endif
#include "netif/xpqueue.h"
#include "xlwipconfig.h"
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#define AXIDMA_TX_INTR_PRIORITY_SET_IN_GIC 0xA0
#define AXIDMA_RX_INTR_PRIORITY_SET_IN_GIC 0xA0
#define AXIETH_INTR_PRIORITY_SET_IN_GIC 0xA0
#define TRIG_TYPE_RISING_EDGE_SENSITIVE 0x3
#define INTC_DIST_BASE_ADDR XPAR_SCUGIC_0_DIST_BASEADDR
#endif
void xaxiemacif_setmac(u32_t index, u8_t *addr);
u8_t* xaxiemacif_getmac(u32_t index);
err_t xaxiemacif_init(struct netif *netif);
int xaxiemacif_input(struct netif *netif);
unsigned get_IEEE_phy_speed(XAxiEthernet *xaxiemacp);
void enable_sgmii_clock(XAxiEthernet *xaxiemacp);
unsigned configure_IEEE_phy_speed(XAxiEthernet *xaxiemacp, unsigned speed);
unsigned phy_setup_axiemac (XAxiEthernet *xaxiemacp);
/* xaxiemacif_hw.c */
void xaxiemac_error_handler(XAxiEthernet * Temac);
/* structure within each netif, encapsulating all information required for
* using a particular temac instance
*/
typedef struct {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
XLlFifo axififo;
#elif defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA)
XMcdma aximcdma;
#else
XAxiDma axidma;
#endif
XAxiEthernet axi_ethernet;
/* queue to store overflow packets */
pq_queue_t *recv_q;
pq_queue_t *send_q;
/* pointers to memory holding buffer descriptors (used only with SDMA) */
void *rx_bdspace;
void *tx_bdspace;
enum ethernet_link_status eth_link_status;
} xaxiemacif_s;
extern xaxiemacif_s xaxiemacif;
#ifndef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
s32_t xaxiemac_is_tx_space_available(xaxiemacif_s *emac);
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
s32_t process_sent_bds(XMcdma_ChanCtrl *Tx_Chan);
#else
s32_t process_sent_bds(XAxiDma_BdRing *txring);
#endif
#endif
/* xaxiemacif_dma.c/xaxiemacif_mcdma.c */
#ifndef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
XStatus init_axi_mcdma(struct xemac_s *xemac);
XStatus axi_mcdma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p);
#else
XStatus init_axi_dma(struct xemac_s *xemac);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
XStatus axidma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p,
u32_t block_till_tx_complete, u32_t *to_block_index);
#else
XStatus axidma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p);
#endif
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif /* __NETIF_XAXIEMACIF_H__ */

View File

@ -0,0 +1,68 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __NETIF_XEMACLITEIF_H__
#define __NETIF_XEMACLITEIF_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "lwip/netif.h"
#include "netif/etharp.h"
#include "netif/xpqueue.h"
#include "xemaclite.h"
#include "xemaclite_i.h"
#include "xstatus.h"
/* structure within each netif, encapsulating all information required for
* using a particular emaclite instance
*/
typedef struct {
XEmacLite *instance;
/* queue to store overflow packets */
pq_queue_t *recv_q;
pq_queue_t *send_q;
enum ethernet_link_status eth_link_status;
} xemacliteif_s;
void xemacliteif_setmac(u32_t index, u8_t *addr);
u8_t* xemacliteif_getmac(u32_t index);
err_t xemacliteif_init(struct netif *netif);
int xemacliteif_input(struct netif *netif);
#ifdef __cplusplus
}
#endif
#endif /* __NETIF_XEMACLITEIF_H__ */

View File

@ -0,0 +1,181 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __NETIF_XEMACPSIF_H__
#define __NETIF_XEMACPSIF_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "xlwipconfig.h"
#include "lwip/netif.h"
#include "netif/etharp.h"
#include "lwip/sys.h"
#include "netif/xadapter.h"
#include "xstatus.h"
#include "sleep.h"
#include "xparameters.h"
#include "xparameters_ps.h" /* defines XPAR values */
#include "xil_types.h"
#include "xil_assert.h"
#include "xil_io.h"
#include "xil_exception.h"
#include "xpseudo_asm.h"
#include "xil_cache.h"
#include "xil_printf.h"
#include "xscugic.h"
#include "xemacps.h" /* defines XEmacPs API */
#include "netif/xpqueue.h"
#include "xlwipconfig.h"
#if defined (__aarch64__) && (EL1_NONSECURE == 1)
#include "xil_smc.h"
#endif
#define ZYNQ_EMACPS_0_BASEADDR 0xE000B000
#define ZYNQ_EMACPS_1_BASEADDR 0xE000C000
#define ZYNQMP_EMACPS_0_BASEADDR 0xFF0B0000
#define ZYNQMP_EMACPS_1_BASEADDR 0xFF0C0000
#define ZYNQMP_EMACPS_2_BASEADDR 0xFF0D0000
#define ZYNQMP_EMACPS_3_BASEADDR 0xFF0E0000
#define CRL_APB_GEM0_REF_CTRL 0xFF5E0050
#define CRL_APB_GEM1_REF_CTRL 0xFF5E0054
#define CRL_APB_GEM2_REF_CTRL 0xFF5E0058
#define CRL_APB_GEM3_REF_CTRL 0xFF5E005C
#define CRL_APB_GEM_DIV0_MASK 0x00003F00
#define CRL_APB_GEM_DIV0_SHIFT 8
#define CRL_APB_GEM_DIV1_MASK 0x003F0000
#define CRL_APB_GEM_DIV1_SHIFT 16
#define VERSAL_EMACPS_0_BASEADDR 0xFF0C0000
#define VERSAL_EMACPS_1_BASEADDR 0xFF0D0000
#define VERSAL_CRL_GEM0_REF_CTRL 0xFF5E0118
#define VERSAL_CRL_GEM1_REF_CTRL 0xFF5E011C
#define VERSAL_CRL_GEM_DIV_MASK 0x0003FF00
#define VERSAL_CRL_APB_GEM_DIV_SHIFT 8
#define VERSAL_NET_EMACPS_0_BASEADDR 0xF19E0000
#define VERSAL_NET_EMACPS_1_BASEADDR 0xF19F0000
#ifdef XPAR_PSX_CRL_0_S_AXI_BASEADDR
#define VERSAL_NET_CRL_GEM0_REF_CTRL ( XPAR_PSX_CRL_0_S_AXI_BASEADDR + 0x118)
#define VERSAL_NET_CRL_GEM1_REF_CTRL ( XPAR_PSX_CRL_0_S_AXI_BASEADDR + 0x11C)
#endif
#define VERSAL_NET_CRL_GEM_DIV_MASK VERSAL_CRL_GEM_DIV_MASK
#define VERSAL_NET_CRL_APB_GEM_DIV_SHIFT VERSAL_CRL_APB_GEM_DIV_SHIFT
#if defined (ARMR5) || (__aarch64__) || (ARMA53_32) || (__MICROBLAZE__)
#if defined (USE_JUMBO_FRAMES)
#define ZYNQMP_USE_JUMBO
#endif
#endif
#define GEM_VERSION_ZYNQMP 7
#define GEM_VERSION_VERSAL 0x107
#define MAX_FRAME_SIZE_JUMBO (XEMACPS_MTU_JUMBO + XEMACPS_HDR_SIZE + XEMACPS_TRL_SIZE)
void xemacpsif_setmac(u32_t index, u8_t *addr);
u8_t* xemacpsif_getmac(u32_t index);
err_t xemacpsif_init(struct netif *netif);
s32_t xemacpsif_input(struct netif *netif);
/* xaxiemacif_hw.c */
void xemacps_error_handler(XEmacPs * Temac);
/* structure within each netif, encapsulating all information required for
* using a particular temac instance
*/
typedef struct {
XEmacPs emacps;
/* queue to store overflow packets */
pq_queue_t *recv_q;
pq_queue_t *send_q;
/* pointers to memory holding buffer descriptors (used only with SDMA) */
void *rx_bdspace;
void *tx_bdspace;
unsigned int last_rx_frms_cntr;
enum ethernet_link_status eth_link_status;
} xemacpsif_s;
extern xemacpsif_s xemacpsif;
s32_t xemacps_is_tx_space_available(xemacpsif_s *emac);
/* xemacpsif_dma.c */
void xemacps_process_sent_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *txring);
u32_t phy_setup_emacps (XEmacPs *xemacpsp, u32_t phy_addr);
#ifdef SGMII_FIXED_LINK
u32_t pcs_setup_emacps (XEmacPs *xemacps);
#endif
void detect_phy(XEmacPs *xemacpsp);
void emacps_send_handler(void *arg);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p,
u32_t block_till_tx_complete, u32_t *to_block_index);
#else
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p);
#endif
void emacps_recv_handler(void *arg);
void emacps_error_handler(void *arg,u8 Direction, u32 ErrorWord);
void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring);
void HandleTxErrors(struct xemac_s *xemac);
void HandleEmacPsError(struct xemac_s *xemac);
XEmacPs_Config *xemacps_lookup_config(unsigned mac_base);
void init_emacps(xemacpsif_s *xemacps, struct netif *netif);
void setup_isr (struct xemac_s *xemac);
XStatus init_dma(struct xemac_s *xemac);
void start_emacps (xemacpsif_s *xemacps);
void free_txrx_pbufs(xemacpsif_s *xemacpsif);
void free_onlytx_pbufs(xemacpsif_s *xemacpsif);
void init_emacps_on_error (xemacpsif_s *xemacps, struct netif *netif);
void clean_dma_txdescs(struct xemac_s *xemac);
void resetrx_on_no_rxdata(xemacpsif_s *xemacpsif);
void reset_dma(struct xemac_s *xemac);
#ifdef __cplusplus
}
#endif
#endif /* __NETIF_XAXIEMACIF_H__ */

View File

@ -0,0 +1,95 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __NETIF_XLLTEMACIF_H__
#define __NETIF_XLLTEMACIF_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "lwip/netif.h"
#include "netif/etharp.h"
#include "netif/xadapter.h"
#include "xparameters.h"
#include "xstatus.h"
#include "xlltemac.h"
#include "xlldma.h"
#include "xllfifo.h"
#include "xlldma_bdring.h"
#include "netif/xpqueue.h"
#include "xlwipconfig.h"
void xlltemacif_setmac(u32_t index, u8_t *addr);
u8_t* xlltemacif_getmac(u32_t index);
err_t xlltemacif_init(struct netif *netif);
int xlltemacif_input(struct netif *netif);
unsigned get_IEEE_phy_speed(XLlTemac *xlltemacp);
unsigned Phy_Setup (XLlTemac *xlltemacp);
unsigned configure_IEEE_phy_speed(XLlTemac *xlltemacp, unsigned speed);
/* xlltemacif_hw.c */
void xlltemac_error_handler(XLlTemac * Temac);
/* structure within each netif, encapsulating all information required for
* using a particular temac instance
*/
typedef struct {
XLlDma lldma;
XLlFifo llfifo;
XLlTemac lltemac;
/* queue to store overflow packets */
pq_queue_t *recv_q;
pq_queue_t *send_q;
/* pointers to memory holding buffer descriptors (used only with SDMA) */
void *rx_bdspace;
void *tx_bdspace;
enum ethernet_link_status eth_link_status;
} xlltemacif_s;
extern xlltemacif_s xlltemacif;
/* xlltemacif_sdma.c */
XStatus init_sdma(struct xemac_s *xemac);
int process_sent_bds(XLlDma_BdRing *txring);
void lldma_send_handler(void *arg);
XStatus lldma_sgsend(xlltemacif_s *xlltemacif, struct pbuf *p);
#ifdef __cplusplus
}
#endif
#endif /* __NETIF_XLLTEMACIF_H__ */

View File

@ -0,0 +1,57 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __LWIP_PBUF_QUEUE_H_
#define __LWIP_PBUF_QUEUE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "lwip/debug.h"
#define PQ_QUEUE_SIZE 4096
typedef struct {
void *data[PQ_QUEUE_SIZE];
int head, tail, len;
} pq_queue_t;
pq_queue_t* pq_create_queue();
int pq_enqueue(pq_queue_t *q, void *p);
void* pq_dequeue(pq_queue_t *q);
int pq_qlength(pq_queue_t *q);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,63 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XTOPOLOGY_H_
#define __XTOPOLOGY_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "xil_types.h"
enum xemac_types { xemac_type_unknown = -1, xemac_type_xps_emaclite, xemac_type_xps_ll_temac, xemac_type_axi_ethernet, xemac_type_emacps };
struct xtopology_t {
UINTPTR emac_baseaddr;
enum xemac_types emac_type;
#ifndef SDT
UINTPTR intc_baseaddr;
unsigned intc_emac_intr; /* valid only for xemac_type_xps_emaclite */
UINTPTR scugic_baseaddr; /* valid only for Zynq */
unsigned scugic_emac_intr; /* valid only for GEM */
#endif
};
extern int xtopology_n_emacs;
extern struct xtopology_t xtopology[];
int xtopology_find_index(unsigned base);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2001-2003 Swedish Institute of Computer Science.
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XLWIPCONFIG_H_
#define __XLWIPCONFIG_H_
#define XLWIP_CONFIG_INCLUDE_GEM
#define XLWIP_CONFIG_N_TX_DESC 64
#define XLWIP_CONFIG_N_RX_DESC 64
//#define CONFIG_LINKSPEED100
#define CONFIG_LINKSPEED_AUTODETECT
//TODO defines
/*
#cmakedefine XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ @XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ@
#cmakedefine XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ @XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ@
#cmakedefine XLWIP_CONFIG_INCLUDE_EMACLITE @XLWIP_CONFIG_INCLUDE_EMACLITE@
#cmakedefine XLWIP_CONFIG_INCLUDE_AXI_ETHERNET @XLWIP_CONFIG_INCLUDE_AXI_ETHERNET@
#cmakedefine XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA @XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA@
#cmakedefine XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO @XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO@
#cmakedefine XLWIP_CONFIG_AXI_ETHERNET_ENABLE_1588 @XLWIP_CONFIG_AXI_ETHERNET_ENABLE_1588@
#cmakedefine XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA @XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA@
#cmakedefine XLWIP_CONFIG_INCLUDE_GEM @XLWIP_CONFIG_INCLUDE_GEM@
#cmakedefine XLWIP_CONFIG_N_TX_DESC @XLWIP_CONFIG_N_TX_DESC@
#cmakedefine XLWIP_CONFIG_N_RX_DESC @XLWIP_CONFIG_N_RX_DESC@
#cmakedefine XLWIP_CONFIG_N_TX_COALESCE @XLWIP_CONFIG_N_TX_COALESCE@
#cmakedefine XLWIP_CONFIG_N_RX_COALESCE @XLWIP_CONFIG_N_RX_COALESCE@
#cmakedefine XLWIP_CONFIG_EMAC_NUMBER @XLWIP_CONFIG_EMAC_NUMBER@
#cmakedefine XLWIP_CONFIG_PCS_PMA_1000BASEX_CORE_PRESENT @XLWIP_CONFIG_PCS_PMA_1000BASEX_CORE_PRESENT@
#cmakedefine XLWIP_CONFIG_PCS_PMA_SGMII_CORE_PRESENT @XLWIP_CONFIG_PCS_PMA_SGMII_CORE_PRESENT@
*/
#endif

View File

@ -0,0 +1,12 @@
target_sources(lwip PRIVATE
xadapter.c
xpqueue.c
xemacpsif_dma.c
xemacpsif_physpeed.c
xemacpsif_hw.c
xemacpsif.c
topology.c
)
target_include_directories(lwip PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})

View File

@ -0,0 +1,37 @@
#include "netif/xtopology.h"
// struct xtopology_t {
// UINTPTR emac_baseaddr;
// enum xemac_types emac_type;
// #ifndef SDT
// UINTPTR intc_baseaddr;
// unsigned intc_emac_intr; /* valid only for xemac_type_xps_emaclite */
// UINTPTR scugic_baseaddr; /* valid only for Zynq */
// unsigned scugic_emac_intr; /* valid only for GEM */
// #endif
// };
int xtopology_n_emacs = 2;
struct xtopology_t xtopology[] = {
{
0xE000B000,
xemac_type_emacps,
#ifndef SDT
0,
0,
0xF8F00100,
0x36
#endif
},
{
0xE000C000,
xemac_type_emacps,
#ifndef SDT
0,
0,
0xF8F00100,
0x4D
#endif
}
};

View File

@ -0,0 +1,502 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#include "xlwipconfig.h"
#include "xemac_ieee_reg.h"
#if !NO_SYS
#endif
#include "lwip/mem.h"
#include "lwip/stats.h"
#include "lwip/sys.h"
#include "lwip/ip.h"
#include "lwip/tcp.h"
#include "lwip/udp.h"
#include "lwip/priv/tcp_priv.h"
#include "netif/etharp.h"
#include "netif/xadapter.h"
#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
#include "netif/xemacliteif.h"
#endif
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
#include "netif/xaxiemacif.h"
#endif
#ifdef XLWIP_CONFIG_INCLUDE_GEM
#include "netif/xemacpsif.h"
#endif
#if !NO_SYS
#include "lwip/tcpip.h"
#define THREAD_STACKSIZE 256
#define LINK_DETECT_THREAD_INTERVAL 1000 /* one second */
void link_detect_thread(void *p);
#endif
/* global lwip debug variable used for debugging */
int lwip_runtime_debug = 0;
u32_t phyaddrforemac;
void
lwip_raw_init()
{
ip_init(); /* Doesn't do much, it should be called to handle future changes. */
#if LWIP_UDP
udp_init(); /* Clears the UDP PCB list. */
#endif
#if LWIP_TCP
tcp_init(); /* Clears the TCP PCB list and clears some internal TCP timers. */
/* Note: you must call tcp_fasttmr() and tcp_slowtmr() at the */
/* predefined regular intervals after this initialization. */
#endif
}
static enum xemac_types
find_mac_type(unsigned base)
{
int i;
#ifdef SDT
for (i = 0; xtopology[i].emac_baseaddr != NULL; i++) {
#else
for (i = 0; i < xtopology_n_emacs; i++) {
#endif
if (xtopology[i].emac_baseaddr == base)
return xtopology[i].emac_type;
}
return xemac_type_unknown;
}
int
xtopology_find_index(unsigned base)
{
int i;
#ifdef SDT
for (i = 0; xtopology[i].emac_baseaddr != NULL; i++) {
#else
for (i = 0; i < xtopology_n_emacs; i++) {
#endif
if (xtopology[i].emac_baseaddr == base)
return i;
}
return -1;
}
/*
* xemac_add: this is a wrapper around lwIP's netif_add function.
* The objective is to provide portability between the different Xilinx MAC's
* This function can be used to add both xps_ethernetlite and xps_ll_temac
* based interfaces
*/
struct netif *
xemac_add(struct netif *netif,
ip_addr_t *ipaddr, ip_addr_t *netmask, ip_addr_t *gw,
unsigned char *mac_ethernet_address,
UINTPTR mac_baseaddr)
{
int i;
struct netif * nif = NULL;
/* set mac address */
netif->hwaddr_len = 6;
for (i = 0; i < 6; i++)
netif->hwaddr[i] = mac_ethernet_address[i];
/* initialize based on MAC type */
switch (find_mac_type(mac_baseaddr)) {
case xemac_type_xps_emaclite:
#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
nif = netif_add(netif, ipaddr, netmask, gw,
(void*)(UINTPTR)mac_baseaddr,
xemacliteif_init,
#if NO_SYS
ethernet_input
#else
tcpip_input
#endif
);
#else
nif = NULL;
#endif
break;
case xemac_type_axi_ethernet:
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
nif = netif_add(netif, ipaddr, netmask, gw,
(void*)(UINTPTR)mac_baseaddr,
xaxiemacif_init,
#if NO_SYS
ethernet_input
#else
tcpip_input
#endif
);
#else
nif = NULL;
#endif
break;
#if defined (__arm__) || defined (__aarch64__)
case xemac_type_emacps:
#ifdef XLWIP_CONFIG_INCLUDE_GEM
nif = netif_add(netif, ipaddr, netmask, gw,
(void*)(UINTPTR)mac_baseaddr,
xemacpsif_init,
#if NO_SYS
ethernet_input
#else
tcpip_input
#endif
);
#endif
break;
#endif
default:
xil_printf("unable to determine type of EMAC with baseaddress 0x%08lx\r\n",
(UINTPTR)mac_baseaddr);
}
#ifdef OS_IS_FREERTOS
/* Start thread to detect link periodically for Hot Plug autodetect */
sys_thread_new("link_detect_thread", link_detect_thread, netif,
THREAD_STACKSIZE, tskIDLE_PRIORITY);
#endif
return nif;
}
#if !NO_SYS
/*
* The input thread calls lwIP to process any received packets.
* This thread waits until a packet is received (sem_rx_data_available),
* and then calls xemacif_input which processes 1 packet at a time.
*/
void
xemacif_input_thread(struct netif *netif)
{
struct xemac_s *emac = (struct xemac_s *)netif->state;
while (1) {
/* sleep until there are packets to process
* This semaphore is set by the packet receive interrupt
* routine.
*/
sys_sem_wait(&emac->sem_rx_data_available);
/* move all received packets to lwIP */
xemacif_input(netif);
}
}
#endif
int
xemacif_input(struct netif *netif)
{
struct xemac_s *emac = (struct xemac_s *)netif->state;
int n_packets = 0;
switch (emac->type) {
case xemac_type_xps_emaclite:
#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
n_packets = xemacliteif_input(netif);
break;
#else
xil_printf("incorrect configuration: xps_ethernetlite drivers not present?");
while(1);
return 0;
#endif
case xemac_type_axi_ethernet:
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
n_packets = xaxiemacif_input(netif);
break;
#else
xil_printf("incorrect configuration: axi_ethernet drivers not present?");
while(1);
return 0;
#endif
#if defined (__arm__) || defined (__aarch64__)
case xemac_type_emacps:
#ifdef XLWIP_CONFIG_INCLUDE_GEM
n_packets = xemacpsif_input(netif);
break;
#else
xil_printf("incorrect configuration: ps7_ethernet drivers not present?\r\n");
while(1);
return 0;
#endif
#endif
default:
xil_printf("incorrect configuration: unknown temac type");
while(1);
return 0;
}
return n_packets;
}
#ifdef SGMII_FIXED_LINK
static u32_t pcs_link_detect(XEmacPs *xemacp)
{
u16_t status;
status = XEmacPs_ReadReg(xemacp->Config.BaseAddress, XEMACPS_PCS_STATUS_OFFSET);
status = XEmacPs_ReadReg(xemacp->Config.BaseAddress, XEMACPS_PCS_STATUS_OFFSET);
status &= XEMACPS_PCS_STATUS_LINK_STATUS_MASK;
if (status)
return 1;
return 0;
}
#endif
#if defined(XLWIP_CONFIG_INCLUDE_GEM)
void emacps_link_status(struct netif *netif, xemacpsif_s *xemacs, XEmacPs *xemacp)
{
u32_t link_speed, phy_link_status, phy_autoneg_status;
u16_t status;
if ((xemacp->IsReady != (u32)XIL_COMPONENT_IS_READY) ||
(xemacs->eth_link_status == ETH_LINK_UNDEFINED))
return;
#ifndef SGMII_FIXED_LINK
/* Read Phy Status register twice to get the confirmation of the current
* link status.
*/
XEmacPs_PhyRead(xemacp, phyaddrforemac, IEEE_STATUS_REG_OFFSET, &status);
XEmacPs_PhyRead(xemacp, phyaddrforemac, IEEE_STATUS_REG_OFFSET, &status);
if (status & IEEE_STAT_LINK_STATUS)
phy_link_status = 1;
else
phy_link_status = 0;
if (status & IEEE_STAT_AUTONEGOTIATE_COMPLETE)
phy_autoneg_status = 1;
else
phy_autoneg_status = 0;
#else
phy_link_status = pcs_link_detect(xemacp);
#endif
if ((xemacs->eth_link_status == ETH_LINK_UP) && (!phy_link_status))
xemacs->eth_link_status = ETH_LINK_DOWN;
switch (xemacs->eth_link_status) {
case ETH_LINK_UNDEFINED:
case ETH_LINK_UP:
return;
case ETH_LINK_DOWN:
netif_set_link_down(netif);
xemacs->eth_link_status = ETH_LINK_NEGOTIATING;
xil_printf("Ethernet Link down\r\n");
break;
case ETH_LINK_NEGOTIATING:
if (phy_link_status && phy_autoneg_status) {
link_speed = phy_setup_emacps(xemacp,
phyaddrforemac);
XEmacPs_SetOperatingSpeed(xemacp, link_speed);
netif_set_link_up(netif);
xemacs->eth_link_status = ETH_LINK_UP;
xil_printf("Ethernet Link up\r\n");
}
break;
}
return;
}
#endif
#if defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET)
void axieth_link_status(struct netif *netif, xaxiemacif_s *xemacs, XAxiEthernet *xemacp)
{
u32_t link_speed, phy_link_status, phy_autoneg_status;
u16_t status;
if ((xemacp->IsReady != (u32)XIL_COMPONENT_IS_READY) ||
(xemacs->eth_link_status == ETH_LINK_UNDEFINED))
return;
#ifndef SGMII_FIXED_LINK
/* Read Phy Status register twice to get the confirmation of the current
* link status.
*/
XAxiEthernet_PhyRead(xemacp, phyaddrforemac, IEEE_STATUS_REG_OFFSET, &status);
XAxiEthernet_PhyRead(xemacp, phyaddrforemac, IEEE_STATUS_REG_OFFSET, &status);
if (status & IEEE_STAT_LINK_STATUS)
phy_link_status = 1;
else
phy_link_status = 0;
if (status & IEEE_STAT_AUTONEGOTIATE_COMPLETE)
phy_autoneg_status = 1;
else
phy_autoneg_status = 0;
#else
phy_link_status = pcs_link_detect(xemacp);
#endif
if ((xemacs->eth_link_status == ETH_LINK_UP) && (!phy_link_status))
xemacs->eth_link_status = ETH_LINK_DOWN;
switch (xemacs->eth_link_status) {
case ETH_LINK_UNDEFINED:
case ETH_LINK_UP:
return;
case ETH_LINK_DOWN:
netif_set_link_down(netif);
xemacs->eth_link_status = ETH_LINK_NEGOTIATING;
xil_printf("Ethernet Link down\r\n");
break;
case ETH_LINK_NEGOTIATING:
if (phy_link_status && phy_autoneg_status) {
link_speed = phy_setup_axiemac(xemacp);
XAxiEthernet_SetOperatingSpeed(xemacp,link_speed);
netif_set_link_up(netif);
xemacs->eth_link_status = ETH_LINK_UP;
xil_printf("Ethernet Link up\r\n");
}
break;
}
return;
}
#endif
#if defined(XLWIP_CONFIG_INCLUDE_EMACLITE)
void emaclite_link_status(struct netif *netif, xemacliteif_s *xemacs, XEmacLite *xemacp)
{
u32_t phy_link_status, status, phy_autoneg_status;
if ((xemacp->IsReady != (u32)XIL_COMPONENT_IS_READY) ||
(xemacs->eth_link_status == ETH_LINK_UNDEFINED))
return;
#ifndef SGMII_FIXED_LINK
/* Read Phy Status register twice to get the confirmation of the current
* link status.
*/
XEmacLite_PhyRead(xemacp, phyaddrforemac, IEEE_STATUS_REG_OFFSET, &status);
XEmacLite_PhyRead(xemacp, phyaddrforemac, IEEE_STATUS_REG_OFFSET, &status);
if (status & IEEE_STAT_LINK_STATUS)
phy_link_status = 1;
else
phy_link_status = 0;
if (status & IEEE_STAT_AUTONEGOTIATE_COMPLETE)
phy_autoneg_status = 1;
else
phy_autoneg_status = 0;
#else
phy_link_status = pcs_link_detect(xemacp);
#endif
if ((xemacs->eth_link_status == ETH_LINK_UP) && (!phy_link_status))
xemacs->eth_link_status = ETH_LINK_DOWN;
switch (xemacs->eth_link_status) {
case ETH_LINK_UNDEFINED:
case ETH_LINK_UP:
return;
case ETH_LINK_DOWN:
netif_set_link_down(netif);
xemacs->eth_link_status = ETH_LINK_NEGOTIATING;
xil_printf("Ethernet Link down\r\n");
break;
case ETH_LINK_NEGOTIATING:
if (phy_link_status && phy_autoneg_status) {
netif_set_link_up(netif);
xemacs->eth_link_status = ETH_LINK_UP;
xil_printf("Ethernet Link up\r\n");
}
break;
}
return;
}
#endif
void eth_link_detect(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
#if defined(XLWIP_CONFIG_INCLUDE_GEM)
xemacpsif_s *xemacps = (xemacpsif_s *)(xemac->state);
XEmacPs *xemacpsp = &xemacps->emacps;
#endif
#if defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET)
xaxiemacif_s *xaxiemac = (xaxiemacif_s *)(xemac->state);
XAxiEthernet *xaxiemacp = &xaxiemac->axi_ethernet;
#endif
#if defined(XLWIP_CONFIG_INCLUDE_EMACLITE)
xemacliteif_s *xemaclite = (xemacliteif_s *)(xemac->state);
XEmacLite *xemaclitep = xemaclite->instance;
#endif
switch (xemac->type) {
case xemac_type_emacps:
#if defined(XLWIP_CONFIG_INCLUDE_GEM)
emacps_link_status(netif, xemacps, xemacpsp);
#endif
break;
case xemac_type_xps_emaclite:
#if defined(XLWIP_CONFIG_INCLUDE_EMACLITE)
emaclite_link_status(netif, xemaclite, xemaclitep);
#endif
break;
case xemac_type_axi_ethernet:
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
axieth_link_status(netif, xaxiemac, xaxiemacp);
#endif
break;
}
}
#if !NO_SYS
void link_detect_thread(void *p)
{
struct netif *netif = (struct netif *) p;
while (1) {
/* Call eth_link_detect() every second to detect Ethernet link
* change.
*/
eth_link_detect(netif);
vTaskDelay(LINK_DETECT_THREAD_INTERVAL / portTICK_RATE_MS);
}
}
#endif

View File

@ -0,0 +1,752 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include <stdio.h>
#include <string.h>
#include <xparameters.h>
#include "xlwipconfig.h"
#include "lwip/opt.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/pbuf.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "lwip/igmp.h"
#include "netif/etharp.h"
#include "netif/xaxiemacif.h"
#include "netif/xadapter.h"
#include "netif/xpqueue.h"
#include "xaxiemacif_fifo.h"
#include "xaxiemacif_hw.h"
#include "xparameters.h"
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#else
#include "xintc.h"
#endif
#else
#include "xinterrupt_wrap.h"
#endif
#if LWIP_IPV6
#include "lwip/ethip6.h"
#endif
/* Define those to better describe your network interface. */
#define IFNAME0 't'
#define IFNAME1 'e'
#if LWIP_IGMP
static err_t xaxiemacif_mac_filter_update (struct netif *netif,
ip_addr_t *group, u8_t action);
static u8_t xaxiemac_mcast_entry_mask = 0;
#endif
#if LWIP_IPV6 && LWIP_IPV6_MLD
#define XAXIEMAC_MAX_MAC_ADDR 4
static err_t xaxiemacif_mld6_mac_filter_update (struct netif *netif,
ip_addr_t *group, u8_t action);
static u8_t xaxiemac_mld6_mcast_entry_mask;
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
extern volatile u32_t notifyinfo[XLWIP_CONFIG_N_TX_DESC];
#endif
/*
* this function is always called with interrupts off
* this function also assumes that there are available BD's
*/
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
static err_t _unbuffered_low_level_output(xaxiemacif_s *xaxiemacif,
struct pbuf *p, u32_t block_till_tx_complete, u32_t *to_block_index )
#else
static err_t _unbuffered_low_level_output(xaxiemacif_s *xaxiemacif,
struct pbuf *p)
#endif
{
XStatus status = 0;
err_t err = ERR_MEM;
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
if (XAxiEthernet_IsDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (block_till_tx_complete == 1) {
status = axidma_sgsend(xaxiemacif, p, 1, to_block_index);
} else {
status = axidma_sgsend(xaxiemacif, p, 0, to_block_index);
}
#else
status = axidma_sgsend(xaxiemacif, p);
#endif
#endif
} else if (XAxiEthernet_IsMcDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
xil_printf("lwip support with mcdma is deprecated\n");
status = axi_mcdma_sgsend(xaxiemacif, p);
#endif
} else {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
status = axififo_send(xaxiemacif, p);
#endif
}
if (status != XST_SUCCESS) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
} else {
err = ERR_OK;
}
#if ETH_PAD_SIZE
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif
#if LINK_STATS
lwip_stats.link.xmit++;
#endif /* LINK_STATS */
return err;
}
/*
* low_level_output():
*
* Should do the actual transmission of the packet. The packet is
* contained in the pbuf that is passed to the function. This pbuf
* might be chained.
*
*/
static err_t low_level_output(struct netif *netif, struct pbuf *p)
{
err_t err = ERR_MEM;
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
u32_t notfifyblocksleepcntr;
u32_t to_block_index;
#endif
SYS_ARCH_DECL_PROTECT(lev);
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
/*
* With AXI Ethernet on Zynq, we observed unexplained delays for
* BD Status update. As a result, we are hitting a condition where
* there are no BDs free to transmit packets. So, we have added
* this logic where we look for the status update in a definite
* loop.
*/
XAxiDma_BdRing *txring = XAxiDma_GetTxRing(&xaxiemacif->axidma);
#endif
int count = 100;
SYS_ARCH_PROTECT(lev);
while (count) {
/* check if space is available to send */
if (xaxiemac_is_tx_space_available(xaxiemacif)) {
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
err = _unbuffered_low_level_output(xaxiemacif, p, 1, &to_block_index);
break;
} else {
err = _unbuffered_low_level_output(xaxiemacif, p, 0, &to_block_index);
break;
}
#else
err = _unbuffered_low_level_output(xaxiemacif, p);
break;
#endif
} else {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
process_sent_bds(txring);
#endif
count--;
}
}
if (count == 0) {
xil_printf("pack dropped, no space\r\n");
SYS_ARCH_UNPROTECT(lev);
goto return_pack_dropped;
}
SYS_ARCH_UNPROTECT(lev);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
/* Wait for approx 1 second before timing out */
notfifyblocksleepcntr = 900000;
while(notifyinfo[to_block_index] == 1) {
usleep(1);
notfifyblocksleepcntr--;
if (notfifyblocksleepcntr <= 0) {
err = ERR_TIMEOUT;
break;
}
}
}
netif_clear_opt_block_tx(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET);
#endif
return_pack_dropped:
return err;
}
/*
* low_level_input():
*
* Should allocate a pbuf and transfer the bytes of the incoming
* packet from the interface into the pbuf.
*
*/
static struct pbuf *low_level_input(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
struct pbuf *p;
/* see if there is data to process */
if (pq_qlength(xaxiemacif->recv_q) == 0)
return NULL;
/* return one packet from receive q */
p = (struct pbuf *)pq_dequeue(xaxiemacif->recv_q);
return p;
}
/*
* xaxiemacif_output():
*
* This function is called by the TCP/IP stack when an IP packet
* should be sent. It calls the function called low_level_output() to
* do the actual transmission of the packet.
*
*/
static err_t xaxiemacif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr)
{
/* resolve hardware address, then send (or queue) packet */
return etharp_output(netif, p, ipaddr);
}
/*
* xaxiemacif_input():
*
* This function should be called when a packet is ready to be read
* from the interface. It uses the function low_level_input() that
* should handle the actual reception of bytes from the network
* interface.
*
* Returns the number of packets read (max 1 packet on success,
* 0 if there are no packets)
*
*/
int xaxiemacif_input(struct netif *netif)
{
struct eth_hdr *ethhdr;
struct pbuf *p;
SYS_ARCH_DECL_PROTECT(lev);
#if !NO_SYS
while (1)
#endif
{
/* move received packet into a new pbuf */
SYS_ARCH_PROTECT(lev);
p = low_level_input(netif);
SYS_ARCH_UNPROTECT(lev);
/* no packet could be read, silently ignore this */
if (p == NULL)
return 0;
/* points to packet payload, which starts with an Ethernet header */
ethhdr = p->payload;
#if LINK_STATS
lwip_stats.link.recv++;
#endif /* LINK_STATS */
switch (htons(ethhdr->type)) {
/* IP or ARP packet? */
case ETHTYPE_IP:
case ETHTYPE_ARP:
#if LWIP_IPV6
/*IPv6 Packet?*/
case ETHTYPE_IPV6:
#endif
#if PPPOE_SUPPORT
/* PPPoE packet? */
case ETHTYPE_PPPOEDISC:
case ETHTYPE_PPPOE:
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (netif->input(p, netif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_input: IP input error\r\n"));
pbuf_free(p);
p = NULL;
}
break;
default:
pbuf_free(p);
p = NULL;
break;
}
}
return 1;
}
static err_t low_level_init(struct netif *netif)
{
unsigned mac_address = (unsigned)(UINTPTR)(netif->state);
struct xemac_s *xemac;
xaxiemacif_s *xaxiemacif;
XAxiEthernet_Config *mac_config;
xaxiemacif = mem_malloc(sizeof *xaxiemacif);
if (xaxiemacif == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac = mem_malloc(sizeof *xemac);
if (xemac == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac->state = (void *)xaxiemacif;
xemac->topology_index = xtopology_find_index(mac_address);
xemac->type = xemac_type_axi_ethernet;
xaxiemacif->send_q = NULL;
xaxiemacif->recv_q = pq_create_queue();
if (!xaxiemacif->recv_q)
return ERR_MEM;
/* maximum transfer unit */
#ifdef USE_JUMBO_FRAMES
netif->mtu = XAE_JUMBO_MTU - XAE_HDR_SIZE;
#else
netif->mtu = XAE_MTU - XAE_HDR_SIZE;
#endif
#if LWIP_IGMP
netif->igmp_mac_filter = xaxiemacif_mac_filter_update;
#endif
#if LWIP_IPV6 && LWIP_IPV6_MLD
netif->mld_mac_filter = xaxiemacif_mld6_mac_filter_update;
#endif
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP |
NETIF_FLAG_LINK_UP;
#if LWIP_IPV6 && LWIP_IPV6_MLD
netif->flags |= NETIF_FLAG_MLD6;
#endif
#if LWIP_IGMP
netif->flags |= NETIF_FLAG_IGMP;
#endif
#if !NO_SYS
sys_sem_new(&xemac->sem_rx_data_available, 0);
#endif
/* obtain config of this emac */
mac_config = xaxiemac_lookup_config((unsigned)(UINTPTR)netif->state);
XAxiEthernet_Initialize(&xaxiemacif->axi_ethernet, mac_config,
mac_config->BaseAddress);
#ifdef XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT
enable_sgmii_clock(&xaxiemacif->axi_ethernet);
#endif
/* figure out if the system has DMA */
if (XAxiEthernet_IsDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
/* initialize the DMA engine */
init_axi_dma(xemac);
#endif
} else if (XAxiEthernet_IsFifo(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
/* initialize the locallink FIFOs */
init_axi_fifo(xemac);
#endif
} else if (XAxiEthernet_IsMcDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
/* Initialize MCDMA engine */
init_axi_mcdma(xemac);
#endif
} else {
/* should not occur */
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_init: mac is not configured with DMA, MCDMA or FIFO\r\n"));
return ERR_IF;
}
/* initialize the mac */
init_axiemac(xaxiemacif, netif);
/* replace the state in netif (currently the emac baseaddress)
* with the mac instance pointer.
*/
netif->state = (void *)xemac;
return ERR_OK;
}
#if LWIP_IPV6 && LWIP_IPV6_MLD
static u8_t xaxiemacif_ip6_addr_ismulticast(ip6_addr_t* ip_addr)
{
if(ip6_addr_ismulticast_linklocal(ip_addr)||
ip6_addr_ismulticast_iflocal(ip_addr) ||
ip6_addr_ismulticast_adminlocal(ip_addr)||
ip6_addr_ismulticast_sitelocal(ip_addr) ||
ip6_addr_ismulticast_orglocal(ip_addr) ||
ip6_addr_ismulticast_global(ip_addr)) {
/*Return TRUE if IPv6 is Multicast type*/
return TRUE;
} else {
return FALSE;
}
}
static void xaxiemacif_mld6_mac_hash_update (struct netif *netif, u8_t *ip_addr,
u8_t action,u8_t entry)
{
u8_t multicast_mac_addr[6];
u8_t multicast_mac_addr_to_clr[6];
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
if (action == NETIF_ADD_MAC_FILTER) {
/* Set Mulitcast mac address in hash table */
multicast_mac_addr[0] = LL_IP6_MULTICAST_ADDR_0;
multicast_mac_addr[1] = LL_IP6_MULTICAST_ADDR_1;
multicast_mac_addr[2] = ip_addr[12];
multicast_mac_addr[3] = ip_addr[13];
multicast_mac_addr[4] = ip_addr[14];
multicast_mac_addr[5] = ip_addr[15];
XAxiEthernet_Stop(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastAdd(&xaxiemacif->axi_ethernet,multicast_mac_addr, entry);
XAxiEthernet_Start(&xaxiemacif->axi_ethernet);
} else if (action == NETIF_DEL_MAC_FILTER) {
/* Remove Mulitcast mac address in hash table */
XAxiEthernet_MulticastGet(&xaxiemacif->axi_ethernet,multicast_mac_addr_to_clr, entry);
XAxiEthernet_Stop(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastClear(&xaxiemacif->axi_ethernet, entry);
XAxiEthernet_Start(&xaxiemacif->axi_ethernet);
}
}
static err_t
xaxiemacif_mld6_mac_filter_update (struct netif *netif, ip_addr_t *group,
u8_t action)
{
u8_t temp_mask;
unsigned int i;
u8_t entry;
u8_t * ip_addr = (u8_t *) group;
if(!(xaxiemacif_ip6_addr_ismulticast((ip6_addr_t*) ip_addr))) {
LWIP_DEBUGF(NETIF_DEBUG,
("%s: The requested MAC address is not a multicast address.\r\n", __func__)); LWIP_DEBUGF(NETIF_DEBUG,
("Multicast address add operation failure !!\r\n"));
return ERR_ARG;
}
if (action == NETIF_ADD_MAC_FILTER) {
for (i = 0; i < XAXIEMAC_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mld6_mcast_entry_mask & temp_mask) == temp_mask) {
continue;
}
entry = i;
xaxiemac_mld6_mcast_entry_mask |= temp_mask;
/* Update mac address in hash table */
xaxiemacif_mld6_mac_hash_update(netif, ip_addr, action,entry);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully added.\r\n", __func__));
return ERR_OK;
}
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers left.\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address add operation failure !!\r\n"));
return ERR_MEM;
} else if (action == NETIF_DEL_MAC_FILTER) {
for (i = 0; i < XAXIEMAC_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mld6_mcast_entry_mask & temp_mask) == temp_mask) {
entry = i;
xaxiemacif_mld6_mac_hash_update(netif, ip_addr,action, entry);
xaxiemac_mld6_mcast_entry_mask &= (~temp_mask);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully removed.\r\n", __func__));
return ERR_OK;
} else {
continue;
}
}
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers present with\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("the requested Multicast MAC address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address removal failure!!.\r\n"));
return ERR_MEM;
}
return ERR_ARG;
}
#endif
#if LWIP_IGMP
static err_t
xaxiemacif_mac_filter_update (struct netif *netif, ip_addr_t *group,
u8_t action)
{
err_t return_val = ERR_OK;
u8_t multicast_mac_addr[6];
u8_t multicast_mac_addr_to_clr[6];
u8_t temp_mask;
int entry;
int i;
u8_t * ip_addr_temp = (u8_t *)group;
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
if (action == IGMP_ADD_MAC_FILTER) {
if ((ip_addr_temp[0] >= 224) && (ip_addr_temp[0] <= 239)) {
if (xaxiemac_mcast_entry_mask >= 0x0F) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: No multicast address registers left.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast MAC address add operation failure !!\r\n"));
return_val = ERR_MEM;
} else {
for (i = 0; i < 4; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mcast_entry_mask &
temp_mask) == temp_mask) {
continue;
} else {
entry = i;
xaxiemac_mcast_entry_mask
|= temp_mask;
multicast_mac_addr[0] = 0x01;
multicast_mac_addr[1] = 0x00;
multicast_mac_addr[2] = 0x5E;
multicast_mac_addr[3] =
ip_addr_temp[1] & 0x7F;
multicast_mac_addr[4] =
ip_addr_temp[2];
multicast_mac_addr[5] =
ip_addr_temp[3];
XAxiEthernet_Stop
(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastAdd
(&xaxiemacif->axi_ethernet,
multicast_mac_addr,entry);
XAxiEthernet_Start
(&xaxiemacif->axi_ethernet);
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: Multicast MAC address successfully added.\r\n"));
return_val = ERR_OK;
break;
}
}
if (i == 4) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: No multicast address registers left.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast MAC address add operation failure !!\r\n"));
return_val = ERR_MEM;
}
}
} else {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: The requested MAC address is not a multicast address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast address add operation failure !!\r\n"));
return_val = ERR_ARG;
}
} else if (action == IGMP_DEL_MAC_FILTER) {
if ((ip_addr_temp[0] < 224) || (ip_addr_temp[0] > 239)) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: The requested MAC address is not a multicast address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast address add operation failure !!\r\n"));
return_val = ERR_ARG;
} else {
for (i = 0; i < 4; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mcast_entry_mask & temp_mask)
== temp_mask) {
XAxiEthernet_MulticastGet
(&xaxiemacif->axi_ethernet,
multicast_mac_addr_to_clr, i);
if ((ip_addr_temp[3] ==
multicast_mac_addr_to_clr[5]) &&
(ip_addr_temp[2] ==
multicast_mac_addr_to_clr[4]) &&
((ip_addr_temp[1] & 0x7f) ==
multicast_mac_addr_to_clr[3])) {
XAxiEthernet_Stop
(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastClear
(&xaxiemacif->axi_ethernet, i);
XAxiEthernet_Start
(&xaxiemacif->axi_ethernet);
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: Multicast MAC address successfully removed.\r\n"));
return_val = ERR_OK;
xaxiemac_mcast_entry_mask &=
(~temp_mask);
break;
} else {
continue;
}
} else {
continue;
}
}
if (i == 4) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: No multicast address registers present with\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" the requested Multicast MAC address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast MAC address removal failure!!.\r\n"));
return_val = ERR_MEM;
}
}
}
return return_val;
}
#endif
/*
* xaxiemacif_init():
*
* Should be called at the beginning of the program to set up the
* network interface. It calls the function low_level_init() to do the
* actual setup of the hardware.
*
*/
err_t
xaxiemacif_init(struct netif *netif)
{
#if LWIP_SNMP
/* ifType ethernetCsmacd(6) @see RFC1213 */
netif->link_type = 6;
/* your link speed here */
netif->link_speed = ;
netif->ts = 0;
netif->ifinoctets = 0;
netif->ifinucastpkts = 0;
netif->ifinnucastpkts = 0;
netif->ifindiscards = 0;
netif->ifoutoctets = 0;
netif->ifoutucastpkts = 0;
netif->ifoutnucastpkts = 0;
netif->ifoutdiscards = 0;
#endif
netif->name[0] = IFNAME0;
netif->name[1] = IFNAME1;
netif->output = xaxiemacif_output;
netif->linkoutput = low_level_output;
#if LWIP_IPV6
netif->output_ip6 = ethip6_output;
#endif
low_level_init(netif);
return ERR_OK;
}

View File

@ -0,0 +1,994 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#include "lwip/sys.h"
#endif
#include "lwip/stats.h"
#include "lwip/inet_chksum.h"
#include "netif/xadapter.h"
#include "netif/xaxiemacif.h"
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#else
#include "xintc_l.h"
#endif
#else
#include "xinterrupt_wrap.h"
#ifndef XCACHE_FLUSH_DCACHE_RANGE
#define XCACHE_FLUSH_DCACHE_RANGE(data, length) \
Xil_DCacheFlushRange((UINTPTR)data, length)
#endif
#ifndef XCACHE_INVALIDATE_DCACHE_RANGE
#define XCACHE_INVALIDATE_DCACHE_RANGE(data, length) \
Xil_DCacheInvalidateRange((u32)data, length)
#endif
#endif
#include "xstatus.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#ifdef CONFIG_XTRACE
#include "xtrace.h"
#endif
#if defined __aarch64__ || defined (__arm__)
#include "xil_mmu.h"
#elif defined (ARMR5)
#include "xreg_cortexr5.h"
#endif
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#ifndef XCACHE_FLUSH_DCACHE_RANGE
#define XCACHE_FLUSH_DCACHE_RANGE(data, length) \
Xil_DCacheFlushRange((UINTPTR)data, length)
#endif
#ifndef XCACHE_INVALIDATE_DCACHE_RANGE
#define XCACHE_INVALIDATE_DCACHE_RANGE(data, length) \
Xil_DCacheInvalidateRange((u32)data, length)
#endif
#endif
/* Byte alignment of BDs */
#define BD_ALIGNMENT (XAXIDMA_BD_MINIMUM_ALIGNMENT*2)
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Function Prototypes *********************************************/
/*
* Function prototypes of the functions used for registering Fast
* Interrupt Handlers
*/
static void axidma_sendfast_handler(void) __attribute__ ((fast_interrupt));
static void axidma_recvfast_handler(void) __attribute__ ((fast_interrupt));
static void xaxiemac_errorfast_handler(void) __attribute__ ((fast_interrupt));
/**************** Variable Declarations **************************************/
/** Variables for Fast Interrupt handlers ***/
struct xemac_s *xemac_fast;
xaxiemacif_s *xaxiemacif_fast;
#endif
#if !NO_SYS
extern u32 xInsideISR;
#endif
#define BD_SIZE_2MB 0x200000
#define BD_SIZE_1MB 0x100000
#define BD_SIZE_64KB 0x10000
#if defined (__aarch64__)
u8_t bd_space[BD_SIZE_2MB] __attribute__ ((aligned (BD_SIZE_2MB)));
#elif defined (__arm__) || defined (ARMR5)
u8_t bd_space[BD_SIZE_1MB] __attribute__ ((aligned (BD_SIZE_1MB)));
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
volatile u32_t notifyinfo[XLWIP_CONFIG_N_TX_DESC];
#endif
#define XAxiDma_BD_TO_INDEX(ringptr, bdptr) \
(((UINTPTR)bdptr - (UINTPTR)(ringptr)->FirstBdAddr) / (ringptr)->Separation)
static inline void bd_csum_enable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
| 1));
}
static inline void bd_csum_disable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
& ~1));
}
static inline void bd_fullcsum_disable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
& ~3));
}
static inline void bd_fullcsum_enable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
| 2));
}
static inline void bd_csum_set(XAxiDma_Bd *bd, u16_t tx_csbegin, u16_t tx_csinsert,
u16_t tx_csinit)
{
u32_t app1;
bd_csum_enable(bd);
/* write start offset and insert offset into BD */
app1 = ((u32_t)tx_csbegin << 16) | (u32_t) tx_csinsert;
XAxiDma_BdWrite(bd, XAXIDMA_BD_USR1_OFFSET, app1);
/* insert init value */
XAxiDma_BdWrite(bd, XAXIDMA_BD_USR2_OFFSET, tx_csinit);
}
static inline u16_t extract_packet_len(XAxiDma_Bd *rxbd) {
u16_t packet_len = XAxiDma_BdRead(rxbd, XAXIDMA_BD_USR4_OFFSET) & 0x3fff;
return packet_len;
}
static inline u16_t extract_csum(XAxiDma_Bd *rxbd) {
u16_t csum = XAxiDma_BdRead(rxbd, XAXIDMA_BD_USR3_OFFSET) & 0xffff;
return csum;
}
static inline u32_t csum_sub(u32_t csum, u16_t v)
{
csum += (u32_t)v;
return csum + (csum < (u32_t)v);
}
/*
* compare if the h/w computed checksum (stored in the rxbd)
* equals the TCP checksum value in the packet
*/
s32_t is_checksum_valid(XAxiDma_Bd *rxbd, struct pbuf *p) {
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto == IP_PROTO_TCP) {
u32_t iphdr_len;
u16_t csum_in_rxbd, pseudo_csum, iphdr_csum, padding_csum;
u16_t tcp_payload_offset;
u32_t computed_csum;
u16_t padding_len, tcp_payload_len, packet_len;
u16_t csum;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_payload_len = htons(IPH_LEN(&ehdr->ip)) - IPH_HL(&ehdr->ip) * 4;
packet_len = extract_packet_len(rxbd);
padding_len = packet_len - tcp_payload_offset - tcp_payload_len;
csum_in_rxbd = extract_csum(rxbd);
pseudo_csum = htons(inet_chksum_pseudo(NULL,
proto, tcp_payload_len, (ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest));
/* xps_ll_temac computes the checksum of the packet starting at byte 14
* we need to subtract the values of the ethernet & IP headers
*/
iphdr_csum = inet_chksum(p->payload + 14, tcp_payload_offset - 14);
/* compute csum of padding bytes, if any */
padding_csum = inet_chksum(p->payload + p->tot_len - padding_len,
padding_len);
/* get the h/w checksum value */
computed_csum = (u32_t)csum_in_rxbd;
/* remove the effect of csumming the iphdr */
computed_csum = csum_sub(computed_csum, ~iphdr_csum);
/* add in the pseudo csum */
computed_csum = csum_sub(computed_csum, ~pseudo_csum);
/* remove any padding effect */
computed_csum = csum_sub(computed_csum, ~padding_csum);
/* normalize computed csum */
while (computed_csum >> 16) {
computed_csum = (computed_csum & 0xffff) + (computed_csum >> 16);
}
/* convert to 16 bits and take 1's complement */
csum = (u16_t)computed_csum;
csum = ~csum;
/* chksum is valid if: computed csum over the packet is 0 */
return !csum;
} else {
/* just say yes to all other packets */
/* the upper layers in the stack will compute and verify the checksum */
return 1;
}
}
static inline void *alloc_bdspace(int n_desc)
{
int space = XAxiDma_BdRingMemCalc(BD_ALIGNMENT, n_desc);
int padding = BD_ALIGNMENT*2;
void *unaligned_mem = mem_malloc(space + padding*4);
void *aligned_mem =
(void *)(((UINTPTR)(unaligned_mem + BD_ALIGNMENT)) & ~(BD_ALIGNMENT - 1));
#if DEBUG
assert(aligned_mem > unaligned_mem);
assert(aligned_mem + space < unaligned_mem + space + padding);
#endif
return aligned_mem;
}
static void axidma_send_handler(void *arg)
{
unsigned irq_status;
struct xemac_s *xemac;
xaxiemacif_s *xaxiemacif;
XAxiDma_BdRing *txringptr;
#if !NO_SYS
xInsideISR++;
#endif
xemac = (struct xemac_s *)(arg);
xaxiemacif = (xaxiemacif_s *)(xemac->state);
txringptr = XAxiDma_GetTxRing(&xaxiemacif->axidma);
XAxiDma_BdRingIntDisable(txringptr, XAXIDMA_IRQ_ALL_MASK);
/* Read pending interrupts */
irq_status = XAxiDma_BdRingGetIrq(txringptr);
/* Acknowledge pending interrupts */
XAxiDma_BdRingAckIrq(txringptr, irq_status);
/* If error interrupt is asserted, raise error flag, reset the
* hardware to recover from the error, and return with no further
* processing.
*/
if (irq_status & XAXIDMA_IRQ_ERROR_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: axidma error interrupt is asserted\r\n",
__FUNCTION__));
XAxiDma_Reset(&xaxiemacif->axidma);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* If Transmit done interrupt is asserted, process completed BD's */
if (irq_status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)) {
process_sent_bds(txringptr);
}
XAxiDma_BdRingIntEnable(txringptr, XAXIDMA_IRQ_ALL_MASK);
#if !NO_SYS
xInsideISR--;
#endif
}
static void setup_rx_bds(XAxiDma_BdRing *rxring)
{
XAxiDma_Bd *rxbd;
s32_t n_bds;
XStatus status;
struct pbuf *p;
u32 bdsts;
n_bds = XAxiDma_BdRingGetFreeCnt(rxring);
while (n_bds > 0) {
n_bds--;
#ifdef USE_JUMBO_FRAMES
p = pbuf_alloc(PBUF_RAW, XAE_MAX_JUMBO_FRAME_SIZE, PBUF_POOL);
#else
p = pbuf_alloc(PBUF_RAW, XAE_MAX_FRAME_SIZE, PBUF_POOL);
#endif
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
xil_printf("unable to alloc pbuf in recv_handler\r\n");
return;
}
status = XAxiDma_BdRingAlloc(rxring, 1, &rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
pbuf_free(p);
return;
}
/* Setup the BD. */
XAxiDma_BdSetBufAddr(rxbd, (UINTPTR)p->payload);
/* Clear everything but the COMPLETE bit, which is cleared when
* committed to hardware.
*/
bdsts = XAxiDma_BdGetSts(rxbd);
bdsts &= XAXIDMA_BD_STS_COMPLETE_MASK;
XAxiDma_BdWrite(rxbd, XAXIDMA_BD_STS_OFFSET, bdsts);
XAxiDma_BdSetLength(rxbd, p->len, rxring->MaxTransferLen);
XAxiDma_BdSetCtrl(rxbd, 0);
XAxiDma_BdSetId(rxbd, p);
#if !defined (__MICROBLAZE__) && !defined (__riscv)
dsb();
#endif
#ifdef USE_JUMBO_FRAMES
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_JUMBO_FRAME_SIZE);
#else
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_FRAME_SIZE);
#endif
#if !defined(__aarch64__)
XCACHE_FLUSH_DCACHE_RANGE(rxbd, sizeof *rxbd);
#endif
/* Enqueue to HW */
status = XAxiDma_BdRingToHw(rxring, 1, rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
if (status == XST_DMA_SG_LIST_ERROR) {
LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XAxiDma_BdRingAlloc()\r\n"));
}
else {
LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));
}
pbuf_free(p);
XAxiDma_BdRingUnAlloc(rxring, 1, rxbd);
return;
}
}
}
static void axidma_recv_handler(void *arg)
{
struct pbuf *p;
u32 irq_status, i, timeOut;
XAxiDma_Bd *rxbd, *rxbdset;
struct xemac_s *xemac;
xaxiemacif_s *xaxiemacif;
XAxiDma_BdRing *rxring;
#if !NO_SYS
xInsideISR++;
#endif
xemac = (struct xemac_s *)(arg);
xaxiemacif = (xaxiemacif_s *)(xemac->state);
rxring = XAxiDma_GetRxRing(&xaxiemacif->axidma);
XAxiDma_BdRingIntDisable(rxring, XAXIDMA_IRQ_ALL_MASK);
/* Read pending interrupts */
irq_status = XAxiDma_BdRingGetIrq(rxring);
/* Acknowledge pending interrupts */
XAxiDma_BdRingAckIrq(rxring, irq_status);
/* If error interrupt is asserted, raise error flag, reset the
* hardware to recover from the error, and return with no further
* processing.
*/
if ((irq_status & XAXIDMA_IRQ_ERROR_MASK)) {
setup_rx_bds(rxring);
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: axidma error interrupt is asserted\r\n",
__FUNCTION__));
XAxiDma_Reset(&xaxiemacif->axidma);
timeOut = 10000;
while (timeOut) {
if (XAxiDma_ResetIsDone(&xaxiemacif->axidma)) {
break;
}
timeOut -= 1;
}
XAxiDma_BdRingIntEnable(rxring, XAXIDMA_IRQ_ALL_MASK);
XAxiDma_Resume(&xaxiemacif->axidma);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* If Reception done interrupt is asserted, call RX call back function
* to handle the processed BDs and then raise the according flag.
*/
if (irq_status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)) {
u32 bd_processed;
u32 rx_bytes;
bd_processed = XAxiDma_BdRingFromHw(rxring, XAXIDMA_ALL_BDS, &rxbdset);
for (i = 0, rxbd = rxbdset; i < bd_processed; i++) {
p = (struct pbuf *)(UINTPTR)XAxiDma_BdGetId(rxbd);
/* Adjust the buffer size to the actual number of bytes received.*/
rx_bytes = extract_packet_len(rxbd);
pbuf_realloc(p, rx_bytes);
#if defined(__aarch64__)
#ifdef USE_JUMBO_FRAMES
XCACHE_INVALIDATE_DCACHE_RANGE(p->payload,
XAE_MAX_JUMBO_FRAME_SIZE);
#else
XCACHE_INVALIDATE_DCACHE_RANGE(p->payload, XAE_MAX_FRAME_SIZE);
#endif
#endif
#if LWIP_PARTIAL_CSUM_OFFLOAD_RX==1
/* Verify for partial checksum offload case */
if (!is_checksum_valid(rxbd, p)) {
LWIP_DEBUGF(NETIF_DEBUG, ("Incorrect csum as calculated by the hw\r\n"));
}
#endif
/* store it in the receive queue,
* where it'll be processed by a different handler
*/
if (pq_enqueue(xaxiemacif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
}
rxbd = (XAxiDma_Bd *)XAxiDma_BdRingNext(rxring, rxbd);
}
/* free up the BD's */
XAxiDma_BdRingFree(rxring, bd_processed, rxbdset);
/* return all the processed bd's back to the stack */
/* setup_rx_bds -> use XAxiDma_BdRingGetFreeCnt */
setup_rx_bds(rxring);
}
XAxiDma_BdRingIntEnable(rxring, XAXIDMA_IRQ_ALL_MASK);
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
}
s32_t xaxiemac_is_tx_space_available(xaxiemacif_s *emac)
{
XAxiDma_BdRing *txring;
txring = XAxiDma_GetTxRing(&emac->axidma);
/* tx space is available as long as there are valid BD's */
return XAxiDma_BdRingGetFreeCnt(txring);
}
s32_t process_sent_bds(XAxiDma_BdRing *txring)
{
XAxiDma_Bd *txbdset, *txbd;
int n_bds, i;
u32_t bdindex;
/* obtain a list of processed BD's */
n_bds = XAxiDma_BdRingFromHw(txring, XAXIDMA_ALL_BDS, &txbdset);
if (n_bds == 0) {
return XST_FAILURE;
}
/* free the pbuf associated with each BD */
for (i = 0, txbd = txbdset; i < n_bds; i++) {
bdindex = XAxiDma_BD_TO_INDEX(txring, txbd);
struct pbuf *p = (struct pbuf *)(UINTPTR)XAxiDma_BdGetId(txbd);
pbuf_free(p);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
notifyinfo[bdindex] = 0;
#endif
txbd = (XAxiDma_Bd *)XAxiDma_BdRingNext(txring, txbd);
}
/* free the processed BD's */
return (XAxiDma_BdRingFree(txring, n_bds, txbdset));
}
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
XStatus axidma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p,
u32_t block_till_tx_complete, u32_t *to_block_index)
#else
XStatus axidma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p)
#endif
{
struct pbuf *q;
s32_t n_pbufs;
XAxiDma_Bd *txbdset, *txbd, *last_txbd = NULL;
XStatus status;
XAxiDma_BdRing *txring;
u32_t max_frame_size;
u32_t bdindex = 0;
#ifdef USE_JUMBO_FRAMES
max_frame_size = XAE_MAX_JUMBO_FRAME_SIZE - 18;
#else
max_frame_size = XAE_MAX_FRAME_SIZE - 18;
#endif
txring = XAxiDma_GetTxRing(&xaxiemacif->axidma);
/* first count the number of pbufs */
for (q = p, n_pbufs = 0; q != NULL; q = q->next)
n_pbufs++;
/* obtain as many BD's */
status = XAxiDma_BdRingAlloc(txring, n_pbufs, &txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
return ERR_IF;
}
for(q = p, txbd = txbdset; q != NULL; q = q->next) {
bdindex = XAxiDma_BD_TO_INDEX(txring, txbd);
/* Send the data from the pbuf to the interface, one pbuf at a
* time. The size of the data in each pbuf is kept in the ->len
* variable.
*/
XAxiDma_BdSetBufAddr(txbd, (UINTPTR)q->payload);
if (q->len > max_frame_size) {
XAxiDma_BdSetLength(txbd, max_frame_size,
txring->MaxTransferLen);
}
else {
XAxiDma_BdSetLength(txbd, q->len, txring->MaxTransferLen);
}
XAxiDma_BdSetId(txbd, (void *)q);
XAxiDma_BdSetCtrl(txbd, 0);
XCACHE_FLUSH_DCACHE_RANGE(q->payload, q->len);
pbuf_ref(q);
last_txbd = txbd;
txbd = (XAxiDma_Bd *)XAxiDma_BdRingNext(txring, txbd);
}
if (n_pbufs == 1) {
XAxiDma_BdSetCtrl(txbdset, XAXIDMA_BD_CTRL_TXSOF_MASK
| XAXIDMA_BD_CTRL_TXEOF_MASK);
} else {
/* in the first packet, set the SOP */
XAxiDma_BdSetCtrl(txbdset, XAXIDMA_BD_CTRL_TXSOF_MASK);
/* in the last packet, set the EOP */
XAxiDma_BdSetCtrl(last_txbd, XAXIDMA_BD_CTRL_TXEOF_MASK);
}
#if LWIP_FULL_CSUM_OFFLOAD_TX==1
bd_fullcsum_disable(txbdset);
if (p->len > sizeof(struct ethip_hdr)) {
bd_fullcsum_enable(txbdset);
}
#endif
#if LWIP_PARTIAL_CSUM_OFFLOAD_TX==1
bd_csum_disable(txbdset);
if (p->len > sizeof(struct ethip_hdr)) {
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto ==
IP_PROTO_TCP) {
u32_t iphdr_len, csum_insert_offset;
u16_t tcp_len; /* TCP header length + data length in bytes */
u16_t csum_init = 0;
u16_t tcp_payload_offset;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_len = p->tot_len - tcp_payload_offset;
/* insert checksum at offset 16 for TCP, 6 for UDP */
if (proto == IP_PROTO_TCP)
csum_insert_offset = tcp_payload_offset + 16;
else if (proto == IP_PROTO_UDP)
csum_insert_offset = tcp_payload_offset + 6;
else
csum_insert_offset = 0;
/* compute pseudo header checksum value */
csum_init = inet_chksum_pseudo(NULL,
(ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest, proto, tcp_len);
/* init buffer descriptor */
bd_csum_set(txbdset, tcp_payload_offset,
csum_insert_offset, htons(~csum_init));
}
}
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (block_till_tx_complete == 1) {
notifyinfo[bdindex] = 1;
*to_block_index = bdindex;
}
#endif
/* enq to h/w */
return XAxiDma_BdRingToHw(txring, n_pbufs, txbdset);
}
XStatus init_axi_dma(struct xemac_s *xemac)
{
XAxiDma_Config *dmaconfig;
XAxiDma_Bd bdtemplate;
XAxiDma_BdRing *rxringptr, *txringptr;
XAxiDma_Bd *rxbd;
struct pbuf *p;
XStatus status;
u32_t i;
u32_t bd_space_index = 0;
UINTPTR baseaddr;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
#if XPAR_INTC_0_HAS_FAST == 1
xaxiemacif_fast = xaxiemacif;
xemac_fast = xemac;
#endif
#if NO_SYS
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#endif
#if !NO_SYS
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#endif
/* FIXME: On ZyqnMP Multiple Axi Ethernet are not supported */
#if defined (__aarch64__)
Xil_SetTlbAttributes((u64)bd_space, NORM_NONCACHE | INNER_SHAREABLE);
#elif defined (ARMR5)
Xil_SetTlbAttributes((s32_t)bd_space, STRONG_ORDERD_SHARED | PRIV_RW_USER_RW);
#elif defined (__arm__)
Xil_SetTlbAttributes((s32_t)bd_space, DEVICE_MEMORY);
#endif
#if defined (__MICROBLAZE__) || defined (__riscv)
xaxiemacif->rx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_RX_DESC);
xaxiemacif->tx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_TX_DESC);
#else
xaxiemacif->rx_bdspace = (void *)(UINTPTR)&(bd_space[bd_space_index]);
bd_space_index += BD_SIZE_64KB;
xaxiemacif->tx_bdspace = (void *)(UINTPTR)&(bd_space[bd_space_index]);
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n",
xaxiemacif->rx_bdspace));
LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n",
xaxiemacif->tx_bdspace));
if (!xaxiemacif->rx_bdspace || !xaxiemacif->tx_bdspace) {
xil_printf("%s@%d: Error: Unable to allocate memory for RX buffer descriptors",
__FILE__, __LINE__);
return ERR_IF;
}
/* initialize DMA */
#ifndef SDT
baseaddr = xaxiemacif->axi_ethernet.Config.AxiDevBaseAddress;
dmaconfig = XAxiDma_LookupConfigBaseAddr(baseaddr);
#else
baseaddr = xaxiemacif->axi_ethernet.AxiDevBaseAddress;
dmaconfig = XAxiDma_LookupConfig(baseaddr);
#endif
XAxiDma_CfgInitialize(&xaxiemacif->axidma, dmaconfig);
rxringptr = XAxiDma_GetRxRing(&xaxiemacif->axidma);
txringptr = XAxiDma_GetTxRing(&xaxiemacif->axidma);
LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));
/* Setup RxBD space.
* Setup a BD template for the Rx channel. This template will be copied to
* every RxBD. We will not have to explicitly set these again.
*/
XAxiDma_BdClear(&bdtemplate);
/* Create the RxBD ring */
status = XAxiDma_BdRingCreate(rxringptr, (UINTPTR) xaxiemacif->rx_bdspace,
(UINTPTR) xaxiemacif->rx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_RX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
return ERR_IF;
}
XAxiDma_BdClear(&bdtemplate);
status = XAxiDma_BdRingClone(rxringptr, &bdtemplate);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
return ERR_IF;
}
/* Create the TxBD ring */
status = XAxiDma_BdRingCreate(txringptr, (UINTPTR) xaxiemacif->tx_bdspace,
(UINTPTR) xaxiemacif->tx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_TX_DESC);
if (status != XST_SUCCESS) {
return ERR_IF;
}
/* We reuse the bd template, as the same one will work for both rx and tx. */
status = XAxiDma_BdRingClone(txringptr, &bdtemplate);
if (status != XST_SUCCESS) {
return ERR_IF;
}
/* Allocate RX descriptors, 1 RxBD at a time.*/
for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
status = XAxiDma_BdRingAlloc(rxringptr, 1, &rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("init_axi_dma: Error allocating RxBD\r\n"));
return ERR_IF;
}
#ifdef USE_JUMBO_FRAMES
p = pbuf_alloc(PBUF_RAW, XAE_MAX_JUMBO_FRAME_SIZE, PBUF_POOL);
#else
p = pbuf_alloc(PBUF_RAW, XAE_MAX_FRAME_SIZE, PBUF_POOL);
#endif
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
return ERR_IF;
}
/* Setup the BD. The BD template used in the call to
* XAxiEthernet_SgSetSpace() set the "last" field of all RxBDs.
* Therefore we are not required to issue a XAxiDma_Bd_SetLast(rxbd)
* here.
*/
XAxiDma_BdSetBufAddr(rxbd, (UINTPTR)p->payload);
XAxiDma_BdSetLength(rxbd, p->len, rxringptr->MaxTransferLen);
XAxiDma_BdSetCtrl(rxbd, 0);
XAxiDma_BdSetId(rxbd, p);
#ifdef USE_JUMBO_FRAMES
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_JUMBO_FRAME_SIZE);
#else
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_FRAME_SIZE);
#endif
#if !defined(__aarch64__)
XCACHE_FLUSH_DCACHE_RANGE(rxbd, sizeof *rxbd);
#endif
/* Enqueue to HW */
status = XAxiDma_BdRingToHw(rxringptr, 1, rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
return ERR_IF;
}
}
status = XAxiDma_BdRingSetCoalesce(txringptr, XLWIP_CONFIG_N_TX_COALESCE,
0x1);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
status = XAxiDma_BdRingSetCoalesce(rxringptr, XLWIP_CONFIG_N_RX_COALESCE,
0x1);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
/* start DMA */
status = XAxiDma_BdRingStart(txringptr);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: failed to start TX BD ring\r\n"));
return ERR_IF;
}
status = XAxiDma_BdRingStart(rxringptr);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: failed to start RX BD ring\r\n"));
return ERR_IF;
}
/* enable DMA interrupts */
XAxiDma_BdRingIntEnable(txringptr, XAXIDMA_IRQ_ALL_MASK);
XAxiDma_BdRingIntEnable(rxringptr, XAXIDMA_IRQ_ALL_MASK);
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(Xil_ExceptionHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(Xil_ExceptionHandler)axidma_send_handler,
xemac);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(Xil_ExceptionHandler)axidma_recv_handler,
xemac);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr,
AXIETH_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
AXIDMA_TX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
AXIDMA_RX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr);
#else
#if NO_SYS
#if XPAR_INTC_0_HAS_FAST == 1
/* Register axiethernet interrupt with interrupt controller as Fast
Interrupts */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_errorfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XFastInterruptHandler)axidma_sendfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XFastInterruptHandler)axidma_recvfast_handler);
#else
/* Register axiethernet interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable DMA interrupts */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XInterruptHandler)axidma_send_handler,
xemac);
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XInterruptHandler)axidma_recv_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET);
/* form new mask enabling AXIDMA & axiethernet interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#else
#if XPAR_INTC_0_HAS_FAST == 1
/* Register axiethernet interrupt with interrupt controller as Fast
Interrupts */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_errorfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XFastInterruptHandler)axidma_sendfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XFastInterruptHandler)axidma_recvfast_handler);
#else
/* Register axiethernet interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable DMA interrupts */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XInterruptHandler)axidma_send_handler,
xemac);
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XInterruptHandler)axidma_recv_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET);
/* form new mask enabling AXIDMA & axiethernet interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#endif
#endif
#else
XSetupInterruptSystem(&xaxiemacif->axi_ethernet, &xaxiemac_error_handler,
xaxiemacif->axi_ethernet.Config.IntrId,
xaxiemacif->axi_ethernet.Config.IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
XSetupInterruptSystem(xemac, &axidma_send_handler,
dmaconfig->IntrId[0],
dmaconfig->IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
XSetupInterruptSystem(xemac, &axidma_recv_handler,
dmaconfig->IntrId[1],
dmaconfig->IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
#endif
return 0;
}
#ifndef SDT
#if XPAR_INTC_0_HAS_FAST == 1
/****************************** Fast receive Handler *************************/
static void axidma_recvfast_handler(void)
{
axidma_recv_handler((void *)xemac_fast);
}
/****************************** Fast Send Handler ****************************/
static void axidma_sendfast_handler(void)
{
axidma_send_handler((void *)xemac_fast);
}
/****************************** Fast Error Handler ***************************/
static void xaxiemac_errorfast_handler(void)
{
xaxiemac_error_handler(&xaxiemacif_fast->axi_ethernet);
}
#endif
#endif

View File

@ -0,0 +1,366 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#include "lwip/sys.h"
#endif
#include "lwip/stats.h"
#include "netif/xadapter.h"
#include "netif/xaxiemacif.h"
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#else
#include "xintc_l.h"
#endif
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#define AXIFIFO_INTR_PRIORITY_SET_IN_GIC 0xA0
#define AXIETH_INTR_PRIORITY_SET_IN_GIC 0xA0
#define TRIG_TYPE_RISING_EDGE_SENSITIVE 0x3
#define INTC_DIST_BASE_ADDR XPAR_SCUGIC_DIST_BASEADDR
#endif
#include "xstatus.h"
#include "xaxiemacif_fifo.h"
#include "xlwipconfig.h"
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Function Prototypes *********************************************/
/*
* Function prototypes of the functions used for registering Fast
* Interrupt Handlers
*/
static void xllfifo_fastintr_handler(void) __attribute__ ((fast_interrupt));
static void xaxiemac_fasterror_handler(void) __attribute__ ((fast_interrupt));
/**************** Variable Declarations **************************************/
/** Variables for Fast Interrupt handlers ***/
struct xemac_s *xemac_fast;
xaxiemacif_s *xaxiemacif_fast;
#endif
#if !NO_SYS
extern u32 xInsideISR;
#endif
int xaxiemac_is_tx_space_available(xaxiemacif_s *emac)
{
return ((XLlFifo_TxVacancy(&emac->axififo) * 4) > XAE_MAX_FRAME_SIZE);
}
static void
xllfifo_recv_handler(struct xemac_s *xemac)
{
u32_t frame_length;
struct pbuf *p;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XLlFifo *llfifo = &xaxiemacif->axififo;
/* While there is data in the fifo ... */
while (XLlFifo_RxOccupancy(llfifo)) {
/* find packet length */
frame_length = XLlFifo_RxGetLen(llfifo);
/* allocate a pbuf */
p = pbuf_alloc(PBUF_RAW, frame_length, PBUF_POOL);
if (!p) {
char tmp_frame[XAE_MAX_FRAME_SIZE];
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
/* receive and drop packet to keep data & len registers in sync */
XLlFifo_Read(llfifo, tmp_frame, frame_length);
continue;
}
/* receive packet */
XLlFifo_Read(llfifo, p->payload, frame_length);
#if ETH_PAD_SIZE
len += ETH_PAD_SIZE; /* allow room for Ethernet padding */
#endif
/* store it in the receive queue, where it'll be processed by xemacif input thread */
if (pq_enqueue(xaxiemacif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
continue;
}
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
#endif
#if LINK_STATS
lwip_stats.link.recv++;
#endif
}
}
static void
fifo_error_handler(xaxiemacif_s *xaxiemacif, u32_t pending_intr)
{
XLlFifo *llfifo = &xaxiemacif->axififo;
if (pending_intr & XLLF_INT_RPURE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Rx under-read error"));
}
if (pending_intr & XLLF_INT_RPORE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Rx over-read error"));
}
if (pending_intr & XLLF_INT_RPUE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Rx fifo empty"));
}
if (pending_intr & XLLF_INT_TPOE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Tx fifo overrun"));
}
if (pending_intr & XLLF_INT_TSE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Tx length mismatch"));
}
/* Reset the tx or rx side of the fifo as needed */
if (pending_intr & XLLF_INT_RXERROR_MASK) {
XLlFifo_IntClear(llfifo, XLLF_INT_RRC_MASK);
XLlFifo_RxReset(llfifo);
}
if (pending_intr & XLLF_INT_TXERROR_MASK) {
XLlFifo_IntClear(llfifo, XLLF_INT_TRC_MASK);
XLlFifo_TxReset(llfifo);
}
}
static void
xllfifo_intr_handler(struct xemac_s *xemac)
{
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XLlFifo *llfifo = &xaxiemacif->axififo;
u32_t pending_fifo_intr = XLlFifo_IntPending(llfifo);
#if !NO_SYS
xInsideISR++;
#endif
while (pending_fifo_intr) {
if (pending_fifo_intr & XLLF_INT_RC_MASK) {
/* receive interrupt */
XLlFifo_IntClear(llfifo, XLLF_INT_RC_MASK);
xllfifo_recv_handler(xemac);
} else if (pending_fifo_intr & XLLF_INT_TC_MASK) {
/* tx intr */
XLlFifo_IntClear(llfifo, XLLF_INT_TC_MASK);
} else {
XLlFifo_IntClear(llfifo, XLLF_INT_ALL_MASK &
~(XLLF_INT_RC_MASK |
XLLF_INT_TC_MASK));
fifo_error_handler(xaxiemacif, pending_fifo_intr);
}
pending_fifo_intr = XLlFifo_IntPending(llfifo);
}
#if !NO_SYS
xInsideISR--;
#endif
}
XStatus init_axi_fifo(struct xemac_s *xemac)
{
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
#if XPAR_INTC_0_HAS_FAST == 1
xaxiemacif_fast = xaxiemacif;
xemac_fast = xemac;
#endif
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
/* initialize ll fifo */
XLlFifo_Initialize(&xaxiemacif->axififo,
XAxiEthernet_AxiDevBaseAddress(&xaxiemacif->axi_ethernet));
/* Clear any pending FIFO interrupts */
XLlFifo_IntClear(&xaxiemacif->axififo, XLLF_INT_ALL_MASK);
/* enable fifo interrupts */
XLlFifo_IntEnable(&xaxiemacif->axififo, XLLF_INT_ALL_MASK);
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XInterruptHandler)xllfifo_intr_handler,
xemac);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr,
AXIETH_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
AXIFIFO_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr);
#else
#if NO_SYS
#if XPAR_INTC_0_HAS_FAST == 1
/* Register temac interrupt with interrupt controller */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_fasterror_handler);
/* connect & enable FIFO interrupt */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XFastInterruptHandler)xllfifo_fastintr_handler);
#else
/* Register temac interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable FIFO interrupt */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XInterruptHandler)xllfifo_intr_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr + XIN_IER_OFFSET);
/* form new mask enabling SDMA & ll_temac interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiFifoIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#else
#if XPAR_INTC_0_HAS_FAST == 1
/* Register temac interrupt with interrupt controller */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_fasterror_handler);
/* connect & enable FIFO interrupt */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XFastInterruptHandler)xllfifo_fastintr_handler);
#else
/* Register temac interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable FIFO interrupt */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XInterruptHandler)xllfifo_intr_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr + XIN_IER_OFFSET);
/* form new mask enabling SDMA & ll_temac interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiFifoIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#endif
#endif
return 0;
}
XStatus axififo_send(xaxiemacif_s *xaxiemacif, struct pbuf *p)
{
XLlFifo *llfifo = &xaxiemacif->axififo;
u32_t l = 0;
struct pbuf *q;
for(q = p; q != NULL; q = q->next) {
/* write frame data to FIFO */
XLlFifo_Write(llfifo, q->payload, q->len);
l += q->len;
}
/* initiate transmit */
XLlFifo_TxSetLen(llfifo, l);
return 0;
}
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Fast Error Handler ********************************************/
void xaxiemac_fasterror_handler(void)
{
xaxiemac_error_handler(&xaxiemacif_fast->axi_ethernet);
}
/********** Fast Interrupt handler *****************************************/
void xllfifo_fastintr_handler(void)
{
xllfifo_intr_handler(xemac_fast);
}
#endif

View File

@ -0,0 +1,50 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XAXIEMACIF_FIFO_H_
#define __XAXIEMACIF_FIFO_H_
#include "xparameters.h"
#include "netif/xaxiemacif.h"
#include "xlwipconfig.h"
#ifdef __cplusplus
extern "C" {
#endif
XStatus init_axi_fifo(struct xemac_s *xemac);
XStatus axififo_send(xaxiemacif_s *xaxiemacif, struct pbuf *p);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,123 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "netif/xaxiemacif.h"
#include "lwipopts.h"
XAxiEthernet_Config *xaxiemac_lookup_config(unsigned mac_base)
{
extern XAxiEthernet_Config XAxiEthernet_ConfigTable[];
XAxiEthernet_Config *CfgPtr = NULL;
unsigned int i;
for (i = 0; i < XPAR_XAXIETHERNET_NUM_INSTANCES; i++) {
if (XAxiEthernet_ConfigTable[i].BaseAddress == mac_base) {
CfgPtr = &XAxiEthernet_ConfigTable[i];
break;
}
}
return (CfgPtr);
}
void init_axiemac(xaxiemacif_s *xaxiemac, struct netif *netif)
{
unsigned link_speed = 1000;
unsigned options;
XAxiEthernet *xaxiemacp;
xaxiemacp = &xaxiemac->axi_ethernet;
XAxiEthernet_Reset(xaxiemacp);
options = XAxiEthernet_GetOptions(xaxiemacp);
options |= XAE_FLOW_CONTROL_OPTION;
#ifdef USE_JUMBO_FRAMES
options |= XAE_JUMBO_OPTION;
#endif
options |= XAE_TRANSMITTER_ENABLE_OPTION;
options |= XAE_RECEIVER_ENABLE_OPTION;
options |= XAE_FCS_STRIP_OPTION;
options |= XAE_MULTICAST_OPTION;
XAxiEthernet_SetOptions(xaxiemacp, options);
XAxiEthernet_ClearOptions(xaxiemacp, ~options);
/* set mac address */
XAxiEthernet_SetMacAddress(xaxiemacp, (unsigned char*)(netif->hwaddr));
link_speed = phy_setup_axiemac(xaxiemacp);
XAxiEthernet_SetOperatingSpeed(xaxiemacp, link_speed);
if (link_speed == 0)
xaxiemac->eth_link_status = ETH_LINK_DOWN;
else
xaxiemac->eth_link_status = ETH_LINK_UP;
/* Setting the operating speed of the MAC needs a delay. */
{
volatile int wait;
for (wait=0; wait < 100000; wait++);
for (wait=0; wait < 100000; wait++);
}
#ifdef NOTNOW
/* in a soft temac implementation, we need to explicitly make sure that
* the RX DCM has been locked. See xps_ll_temac manual for details.
* This bit is guaranteed to be 1 for hard temac's
*/
lock_message_printed = 0;
while (!(XAxiEthernet_ReadReg(xaxiemacp->Config.BaseAddress, XAE_IS_OFFSET)
& XAE_INT_RXDCMLOCK_MASK)) {
int first = 1;
if (first) {
LWIP_DEBUGF(NETIF_DEBUGF, ("Waiting for RX DCM to lock.."));
first = 0;
lock_message_printed = 1;
}
}
if (lock_message_printed)
LWIP_DEBUGF(NETIF_DEBUGF, ("RX DCM locked.\r\n"));
#endif
/* start the temac */
XAxiEthernet_Start(xaxiemacp);
/* enable MAC interrupts */
XAxiEthernet_IntEnable(xaxiemacp, XAE_INT_RECV_ERROR_MASK);
}
void xaxiemac_error_handler(XAxiEthernet * Temac)
{
unsigned Pending;
Pending = XAxiEthernet_IntPending(Temac);
XAxiEthernet_IntClear(Temac, Pending);
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XAXIEMACIF_HW_H_
#define __XAXIEMACIF_HW_H_
#include "netif/xaxiemacif.h"
#include "lwip/netif.h"
#ifdef __cplusplus
extern "C" {
#endif
XAxiEthernet_Config * xaxiemac_lookup_config(unsigned mac_base);
void init_axiemac(xaxiemacif_s *xaxiemacif, struct netif *netif);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,839 @@
/*
* Copyright (C) 2018 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "lwip/sys.h"
#endif
#include "lwip/stats.h"
#include "lwip/inet_chksum.h"
#include "netif/xadapter.h"
#include "netif/xaxiemacif.h"
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#endif
#include "xstatus.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#if defined __aarch64__
#include "xil_mmu.h"
#endif
#if defined ARMR5
#include "xil_mpu.h"
#endif
#define PARTIAL_CSUM_ENABLE 0x00000001 /* Option for partial csum enable */
#define FULL_CSUM_ENABLE 0x00000002 /* Option for full csum enable */
#define BD_USR0_OFFSET 0 /* AXI4-Stream Control Word offset from
* the start of app user words in BD.
* Offset 0 means, Control Word 0, used
* for enabling checksum offloading.
*/
#define BD_USR1_OFFSET 1 /* AXI4-Stream Control Word offset from
* the start of app user words in BD.
* Offset means, Control Word 1, used
* for mentioning checksum begin and
* checksum insert points
*/
#define BD_USR2_OFFSET 2 /* AXI4-Stream Control Word offset from
* the start of app user words in BD.
* Offset 2 means, Control Word 2, used
* for mentioning checksum seed.
*/
#define XMCDMA_ALL_BDS 0xFFFF
#define XMCDMA_BD_LENGTH_MASK 0x007FFFFF
#define XMCDMA_COALESCEDELAY 0x1
#define RESET_TIMEOUT_COUNT 10000
#define BLOCK_SIZE_2MB 0x200000
#define BLOCK_SIZE_1MB 0x100000
#if defined (__aarch64__)
#define BD_SIZE BLOCK_SIZE_2MB
static u8_t bd_space[BD_SIZE] __attribute__ ((aligned (BLOCK_SIZE_2MB)));
#else
#define BD_SIZE BLOCK_SIZE_1MB
static u8_t bd_space[BD_SIZE] __attribute__ ((aligned (BLOCK_SIZE_1MB)));
#endif
static u8_t *bd_mem_ptr = bd_space;
#if !NO_SYS
extern u32 xInsideISR;
#endif
static inline void bd_csum_enable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, PARTIAL_CSUM_ENABLE);
}
static inline void bd_csum_disable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, ~PARTIAL_CSUM_ENABLE);
}
static inline void bd_fullcsum_disable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, ~FULL_CSUM_ENABLE);
}
static inline void bd_fullcsum_enable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, FULL_CSUM_ENABLE);
}
static inline void bd_csum_set(XMcdma_Bd *bd, u16_t tx_csbegin,
u16_t tx_csinsert, u16_t tx_csinit)
{
u32_t app1;
bd_csum_enable(bd);
/* write start offset and insert offset into BD */
app1 = ((u32_t)tx_csbegin << 16) | tx_csinsert;
XMcDma_BdSetAppWord(bd, BD_USR1_OFFSET, app1);
/* insert init value */
XMcDma_BdSetAppWord(bd, BD_USR2_OFFSET, tx_csinit);
}
static inline u32_t extract_packet_len(XMcdma_Bd *rxbd) {
return XMcDma_BdGetActualLength(rxbd, XMCDMA_BD_LENGTH_MASK);
}
static inline u16_t extract_csum(XMcdma_Bd *rxbd) {
return XMcdma_BdRead64(rxbd, XMCDMA_BD_USR3_OFFSET) & 0xffff;
}
static inline u32_t csum_sub(u32_t csum, u16_t v)
{
csum += (u32_t)v;
return csum + (csum < (u32_t)v);
}
/*
* compare if the h/w computed checksum (stored in the rxbd)
* equals the TCP checksum value in the packet
*/
s32_t is_checksum_valid(XMcdma_Bd *rxbd, struct pbuf *p)
{
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto == IP_PROTO_TCP) {
u32_t iphdr_len;
u16_t csum_in_rxbd, pseudo_csum, iphdr_csum, padding_csum;
u16_t tcp_payload_offset;
u32_t computed_csum;
u16_t padding_len, tcp_payload_len, packet_len;
u16_t csum;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_payload_len = htons(IPH_LEN(&ehdr->ip)) -
IPH_HL(&ehdr->ip) * 4;
packet_len = extract_packet_len(rxbd);
padding_len = packet_len - tcp_payload_offset - tcp_payload_len;
csum_in_rxbd = extract_csum(rxbd);
pseudo_csum = htons(ip_chksum_pseudo(NULL, proto,
tcp_payload_len,
(ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest));
/* xps_ll_temac computes the checksum of the packet starting
* at byte XAE_HDR_SIZE we need to subtract the values of
* the ethernet & IP headers
*/
iphdr_csum = inet_chksum(p->payload + XAE_HDR_SIZE, iphdr_len);
/* compute csum of padding bytes, if any */
padding_csum = inet_chksum(p->payload + p->tot_len -
padding_len, padding_len);
/* get the h/w checksum value */
computed_csum = (u32_t)csum_in_rxbd;
/* remove the effect of csumming the iphdr */
computed_csum = csum_sub(computed_csum, ~iphdr_csum);
/* add in the pseudo csum */
computed_csum = csum_sub(computed_csum, ~pseudo_csum);
/* remove any padding effect */
computed_csum = csum_sub(computed_csum, ~padding_csum);
/* normalize computed csum */
while (computed_csum >> 16) {
computed_csum = (computed_csum & 0xffff) +
(computed_csum >> 16);
}
/* convert to 16 bits and take 1's complement */
csum = (u16_t)computed_csum;
csum = ~csum;
/* chksum is valid if: computed csum over the packet is 0 */
return !csum;
} else {
/* just say yes to all other packets */
/* the upper layers in the stack will compute and
* verify the checksum */
return 1;
}
}
#define XMcdma_BdMemCalc(Alignment, NumBd) \
(int)((sizeof(XMcdma_Bd)+((Alignment)-1)) & ~((Alignment)-1))*(NumBd)
static inline void *alloc_bdspace(int n_desc, u32 alignment)
{
int space = XMcdma_BdMemCalc(alignment, n_desc);
void *unaligned_mem = bd_mem_ptr;
void *aligned_mem =
(void *)(((UINTPTR)(unaligned_mem + alignment - 1)) & ~(alignment - 1));
if (aligned_mem + space > (void *)(bd_space + BD_SIZE)) {
LWIP_DEBUGF(NETIF_DEBUG, ("Unable to allocate BD space\r\n"));
return NULL;
}
bd_mem_ptr = aligned_mem + space;
return aligned_mem;
}
static void axi_mcdma_send_error_handler(void *CallBackRef, u32 ChanId, u32 Mask)
{
u32 timeOut;
XMcdma *McDmaInstPtr = (XMcdma *)((void *)CallBackRef);
#if !NO_SYS
xInsideISR++;
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma error interrupt is asserted, Chan_id = "
"%d, Mask = %d\r\n", __FUNCTION__, ChanId, Mask));
XMcDma_Reset(McDmaInstPtr);
timeOut = RESET_TIMEOUT_COUNT;
while (timeOut) {
if (XMcdma_ResetIsDone(McDmaInstPtr))
break;
timeOut -= 1;
}
if (!timeOut) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma reset timed out\r\n", __func__));
}
#if !NO_SYS
xInsideISR--;
#endif
}
static void axi_mcdma_send_handler(void *CallBackRef, u32 ChanId)
{
XMcdma *McDmaInstPtr = (XMcdma *)((void *)CallBackRef);
XMcdma_ChanCtrl *Tx_Chan = XMcdma_GetMcdmaTxChan(McDmaInstPtr, ChanId);
#if !NO_SYS
xInsideISR++;
#endif
process_sent_bds(Tx_Chan);
#if !NO_SYS
xInsideISR--;
#endif
}
static void setup_rx_bds(XMcdma_ChanCtrl *Rx_Chan, u32_t n_bds)
{
XMcdma_Bd *rxbd;
u32_t i = 0;
XStatus status;
struct pbuf *p;
u32 bdsts;
#ifdef USE_JUMBO_FRAMES
u32 max_frame_size = XAE_MAX_JUMBO_FRAME_SIZE + IEEE_1588_PAD_SIZE;
#else
u32 max_frame_size = XAE_MAX_FRAME_SIZE + IEEE_1588_PAD_SIZE;
#endif
for (i = 0; i < n_bds; i++) {
p = pbuf_alloc(PBUF_RAW, max_frame_size, PBUF_POOL);
if (!p) {
LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
return;
}
rxbd = (XMcdma_Bd *)XMcdma_GetChanCurBd(Rx_Chan);
status = XMcDma_ChanSubmit(Rx_Chan, (UINTPTR)p->payload,
p->len);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
pbuf_free(p);
return;
}
/* Clear everything but the COMPLETE bit, which is cleared when
* committed to hardware.
*/
bdsts = XMcDma_BdGetSts(rxbd);
bdsts &= XMCDMA_BD_STS_COMPLETE_MASK;
XMcdma_BdWrite(rxbd, XMCDMA_BD_STS_OFFSET, bdsts);
XMcDma_BdSetCtrl(rxbd, 0);
XMcdma_BdSetSwId(rxbd, p);
#if defined(__aarch64__)
Xil_DCacheInvalidateRange((UINTPTR)p->payload,
(UINTPTR)max_frame_size);
#else
Xil_DCacheFlushRange((UINTPTR)p->payload,
(UINTPTR)max_frame_size);
#endif
}
#if !defined (__MICROBLAZE__) && !defined (__riscv)
dsb();
#endif
if (n_bds) {
/* Enqueue to HW */
status = XMcDma_ChanToHw(Rx_Chan);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware\n\r"));
}
}
}
static void axi_mcdma_recv_error_handler(void *CallBackRef, u32 ChanId)
{
u32 timeOut;
XMcdma_ChanCtrl *Rx_Chan;
struct xemac_s *xemac = (struct xemac_s *)(CallBackRef);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XMcdma *McDmaInstPtr = &xaxiemacif->aximcdma;
#if !NO_SYS
xInsideISR++;
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma error interrupt is asserted\r\n",
__FUNCTION__));
Rx_Chan = XMcdma_GetMcdmaRxChan(McDmaInstPtr, ChanId);
setup_rx_bds(Rx_Chan, Rx_Chan->BdCnt);
XMcDma_Reset(McDmaInstPtr);
timeOut = RESET_TIMEOUT_COUNT;
while (timeOut) {
if (XMcdma_ResetIsDone(McDmaInstPtr))
break;
timeOut -= 1;
}
if (!timeOut) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma reset timed out\r\n", __func__));
}
XMcDma_ChanToHw(Rx_Chan);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
static void axi_mcdma_recv_handler(void *CallBackRef, u32 ChanId)
{
struct pbuf *p;
u32 i, rx_bytes, ProcessedBdCnt;
XMcdma_Bd *rxbd, *rxbdset;
struct xemac_s *xemac = (struct xemac_s *)(CallBackRef);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XMcdma *McDmaInstPtr = &xaxiemacif->aximcdma;
XMcdma_ChanCtrl *Rx_Chan;
#if !NO_SYS
xInsideISR++;
#endif
Rx_Chan = XMcdma_GetMcdmaRxChan(McDmaInstPtr, ChanId);
ProcessedBdCnt = XMcdma_BdChainFromHW(Rx_Chan, XMCDMA_ALL_BDS, &rxbdset);
for (i = 0, rxbd = rxbdset; i < ProcessedBdCnt; i++) {
p = (struct pbuf *)(UINTPTR)XMcdma_BdGetSwId(rxbd);
/* Adjust the buffer size to actual number of bytes received.*/
rx_bytes = extract_packet_len(rxbd);
#ifndef __aarch64__
Xil_DCacheInvalidateRange((UINTPTR)p->payload,
(UINTPTR)rx_bytes);
#endif
pbuf_realloc(p, rx_bytes);
#if LWIP_PARTIAL_CSUM_OFFLOAD_RX==1
/* Verify for partial checksum offload case */
if (!is_checksum_valid(rxbd, p)) {
LWIP_DEBUGF(NETIF_DEBUG, ("Incorrect csum as calculated by the hw\r\n"));
}
#endif
/* store it in the receive queue,
* where it'll be processed by a different handler
*/
if (pq_enqueue(xaxiemacif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
}
rxbd = (XMcdma_Bd *)XMcdma_BdChainNextBd(Rx_Chan, rxbd);
}
/* free up the BD's */
XMcdma_BdChainFree(Rx_Chan, ProcessedBdCnt, rxbdset);
/* return all the processed bd's back to the stack */
setup_rx_bds(Rx_Chan, Rx_Chan->BdCnt);
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
}
s32_t xaxiemac_is_tx_space_available(xaxiemacif_s *xaxiemacif)
{
XMcdma_ChanCtrl *Tx_Chan;
u8_t ChanId;
for (ChanId = 1;
ChanId <= xaxiemacif->axi_ethernet.Config.AxiMcDmaChan_Cnt;
ChanId++) {
Tx_Chan = XMcdma_GetMcdmaTxChan(&xaxiemacif->aximcdma, ChanId);
if (Tx_Chan->BdCnt) {
return Tx_Chan->BdCnt;
}
}
return 0;
}
s32_t process_sent_bds(XMcdma_ChanCtrl *Tx_Chan)
{
int ProcessedBdCnt, i;
XStatus status;
XMcdma_Bd *txbdset, *txbd;
ProcessedBdCnt = XMcdma_BdChainFromHW(Tx_Chan, XMCDMA_ALL_BDS,
&txbdset);
if (ProcessedBdCnt == 0) {
return XST_FAILURE;
}
/* free the pbuf associated with each BD */
for (i = 0, txbd = txbdset; i < ProcessedBdCnt; i++) {
struct pbuf *p = (struct pbuf *)(UINTPTR)XMcdma_BdGetSwId(txbd);
pbuf_free(p);
txbd = (XMcdma_Bd *)XMcdma_BdChainNextBd(Tx_Chan, txbd);
}
/* free the processed BD's */
status = XMcdma_BdChainFree(Tx_Chan, ProcessedBdCnt, txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error freeing up TxBDs"));
return XST_FAILURE;
}
return XST_SUCCESS;
}
#if LWIP_PARTIAL_CSUM_OFFLOAD_TX==1
static void update_partial_cksum_offload(XMcdma_Bd *txbdset, struct pbuf *p)
{
if (p->len > sizeof(struct ethip_hdr)) {
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto ==
IP_PROTO_TCP) {
u32_t iphdr_len, csum_insert_offset;
u16_t tcp_len; /* TCP header length + data length in bytes */
u16_t csum_init = 0;
u16_t tcp_payload_offset;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_len = p->tot_len - tcp_payload_offset;
/* insert checksum at offset 16 for TCP, 6 for UDP */
if (proto == IP_PROTO_TCP)
csum_insert_offset = tcp_payload_offset + 16;
else if (proto == IP_PROTO_UDP)
csum_insert_offset = tcp_payload_offset + 6;
else
csum_insert_offset = 0;
/* compute pseudo header checksum value */
csum_init = ip_chksum_pseudo(NULL, proto, tcp_len,
(ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest);
/* init buffer descriptor */
bd_csum_set(txbdset, tcp_payload_offset,
csum_insert_offset, htons(~csum_init));
}
}
}
#endif
XStatus axi_mcdma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p)
{
struct pbuf *q;
u32_t n_pbufs = 0;
XMcdma_Bd *txbdset, *txbd, *last_txbd = NULL;
XMcdma_ChanCtrl *Tx_Chan;
XStatus status;
static u8_t ChanId = 1;
u8_t next_ChanId = ChanId;
/* first count the number of pbufs */
for (q = p; q != NULL; q = q->next)
n_pbufs++;
/* Transfer packets to TX DMA Channels in round-robin manner */
do {
Tx_Chan = XMcdma_GetMcdmaTxChan(&xaxiemacif->aximcdma, ChanId);
if (++ChanId > xaxiemacif->axi_ethernet.Config.AxiMcDmaChan_Cnt)
ChanId = 1;
if ((next_ChanId == ChanId) && (n_pbufs > Tx_Chan->BdCnt)) {
LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error, not enough BD space in All Chans\r\n"));
return ERR_IF;
}
} while (n_pbufs > Tx_Chan->BdCnt);
txbdset = (XMcdma_Bd *)XMcdma_GetChanCurBd(Tx_Chan);
for (q = p, txbd = txbdset; q != NULL; q = q->next) {
/* Send the data from the pbuf to the interface, one pbuf at a
* time. The size of the data in each pbuf is kept in the ->len
* variable.
*/
XMcDma_BdSetCtrl(txbd, 0);
XMcdma_BdSetSwId(txbd, (void *)q);
Xil_DCacheFlushRange((UINTPTR)q->payload, q->len);
status = XMcDma_ChanSubmit(Tx_Chan, (UINTPTR)q->payload,
q->len);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("ChanSubmit failed\n\r"));
return XST_FAILURE;
}
pbuf_ref(q);
last_txbd = txbd;
txbd = (XMcdma_Bd *)XMcdma_BdChainNextBd(Tx_Chan, txbd);
}
if (n_pbufs == 1) {
XMcDma_BdSetCtrl(txbdset, XMCDMA_BD_CTRL_SOF_MASK
| XMCDMA_BD_CTRL_EOF_MASK);
} else {
/* in the first packet, set the SOP */
XMcDma_BdSetCtrl(txbdset, XMCDMA_BD_CTRL_SOF_MASK);
/* in the last packet, set the EOP */
XMcDma_BdSetCtrl(last_txbd, XMCDMA_BD_CTRL_EOF_MASK);
}
#if LWIP_FULL_CSUM_OFFLOAD_TX==1
bd_fullcsum_disable(txbdset);
if (p->len > sizeof(struct ethip_hdr)) {
bd_fullcsum_enable(txbdset);
}
#endif
#if LWIP_PARTIAL_CSUM_OFFLOAD_TX==1
bd_csum_disable(txbdset);
update_partial_cksum_offload(txbdset, p);
#endif
DATA_SYNC;
/* enq to h/w */
return XMcDma_ChanToHw(Tx_Chan);
}
void axi_mcdma_register_handlers(struct xemac_s *xemac, u8 ChanId)
{
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XMcdma *McDmaInstPtr = &xaxiemacif->aximcdma;
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(Xil_InterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiMcDmaRxIntr[ChanId - 1],
(Xil_InterruptHandler)XMcdma_IntrHandler,
McDmaInstPtr);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiMcDmaTxIntr[ChanId - 1],
(Xil_InterruptHandler)XMcdma_TxIntrHandler,
McDmaInstPtr);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr,
AXIETH_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaTxIntr[ChanId - 1],
AXIDMA_TX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaRxIntr[ChanId - 1],
AXIDMA_RX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaTxIntr[ChanId - 1]);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaRxIntr[ChanId - 1]);
#endif /* XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ */
}
XStatus axi_mcdma_setup_rx_chan(struct xemac_s *xemac, u32_t ChanId)
{
XMcdma_ChanCtrl *Rx_Chan;
XStatus status;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
/* RX chan configurations */
Rx_Chan = XMcdma_GetMcdmaRxChan(&xaxiemacif->aximcdma, ChanId);
/* Disable all interrupts */
XMcdma_IntrDisable(Rx_Chan, XMCDMA_IRQ_ALL_MASK);
status = XMcDma_ChanBdCreate(Rx_Chan, (UINTPTR) xaxiemacif->rx_bdspace,
XLWIP_CONFIG_N_RX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Rx bd create failed with %d\r\n", status));
return XST_FAILURE;
}
xaxiemacif->rx_bdspace += (XLWIP_CONFIG_N_RX_DESC * sizeof(XMcdma_Bd));
/* Setup Interrupt System and register callbacks */
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_HANDLER_DONE,
(void *)axi_mcdma_recv_handler, xemac);
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_HANDLER_ERROR,
(void *)axi_mcdma_recv_error_handler, xemac);
status = XMcdma_SetChanCoalesceDelay(Rx_Chan,
XLWIP_CONFIG_N_RX_COALESCE,
XMCDMA_COALESCEDELAY);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
setup_rx_bds(Rx_Chan, XLWIP_CONFIG_N_RX_DESC);
/* enable DMA interrupts */
XMcdma_IntrEnable(Rx_Chan, XMCDMA_IRQ_ALL_MASK);
return XST_SUCCESS;
}
XStatus axi_mcdma_setup_tx_chan(struct xemac_s *xemac, u8 ChanId)
{
XStatus status;
XMcdma_ChanCtrl *Tx_Chan;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
/* TX chan configurations */
Tx_Chan = XMcdma_GetMcdmaTxChan(&xaxiemacif->aximcdma, ChanId);
XMcdma_IntrDisable(Tx_Chan, XMCDMA_IRQ_ALL_MASK);
status = XMcDma_ChanBdCreate(Tx_Chan, (UINTPTR) xaxiemacif->tx_bdspace,
XLWIP_CONFIG_N_TX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("TX bd create failed with %d\r\n", status));
return XST_FAILURE;
}
xaxiemacif->tx_bdspace += (XLWIP_CONFIG_N_TX_DESC * sizeof(XMcdma_Bd));
/* Setup Interrupt System and register callbacks */
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_TX_HANDLER_DONE,
(void *)axi_mcdma_send_handler, &xaxiemacif->aximcdma);
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_TX_HANDLER_ERROR,
(void *)axi_mcdma_send_error_handler,
&xaxiemacif->aximcdma);
status = XMcdma_SetChanCoalesceDelay(Tx_Chan,
XLWIP_CONFIG_N_TX_COALESCE,
XMCDMA_COALESCEDELAY);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
XMcdma_IntrEnable(Tx_Chan, XMCDMA_IRQ_ALL_MASK);
return XST_SUCCESS;
}
XStatus init_axi_mcdma(struct xemac_s *xemac)
{
XMcdma_Config *dmaconfig;
XStatus status;
u32_t ChanId;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
UINTPTR baseaddr;
/*
* Disable L1 prefetch if the processor type is Cortex A53. It is
* observed that the L1 prefetching for ARMv8 can cause issues while
* dealing with cache memory on Rx path. On Rx path, the lwIP adapter
* does a clean and invalidation of buffers (pbuf payload) before
* allocating them to Rx BDs. However, there are chances that the
* the same cache line may get prefetched by the time Rx data is
* DMAed to the same buffer. In such cases, CPU fetches stale data from
* cache memory instead of getting them from memory. To avoid such
* scenarios L1 prefetch is being disabled for ARMv8. That can cause
* a performance degradation in the range of 3-5%. In tests, it is
* generally observed that this performance degradation is quite
* insignificant to be really visible.
*/
#if defined __aarch64__
Xil_ConfigureL1Prefetch(0);
#endif
xaxiemacif->rx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_RX_DESC *
(XMCDMA_MAX_CHAN_PER_DEVICE / 2),
XMCDMA_BD_MINIMUM_ALIGNMENT);
if (!xaxiemacif->rx_bdspace) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: Unable to allocate memory for "
"RX buffer descriptors", __FILE__, __LINE__));
return ERR_IF;
}
xaxiemacif->tx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_TX_DESC *
(XMCDMA_MAX_CHAN_PER_DEVICE / 2),
XMCDMA_BD_MINIMUM_ALIGNMENT);
if (!xaxiemacif->tx_bdspace) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: Unable to allocate memory for "
"TX buffer descriptors", __FILE__, __LINE__));
return ERR_IF;
}
/* Mark the BD Region as uncacheable */
#if defined(__aarch64__)
Xil_SetTlbAttributes((UINTPTR)bd_space,
NORM_NONCACHE | INNER_SHAREABLE);
#elif defined (ARMR5)
Xil_SetTlbAttributes((INTPTR)bd_space,
DEVICE_SHARED | PRIV_RW_USER_RW);
#else
Xil_SetTlbAttributes((INTPTR)bd_space, DEVICE_MEMORY);
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n",
xaxiemacif->rx_bdspace));
LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n",
xaxiemacif->tx_bdspace));
/* Initialize MCDMA */
baseaddr = xaxiemacif->axi_ethernet.Config.AxiDevBaseAddress;
dmaconfig = XMcdma_LookupConfigBaseAddr(baseaddr);
if (!baseaddr) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: Lookup Config failed\r\n", __FILE__,
__LINE__));
}
status = XMcDma_CfgInitialize(&xaxiemacif->aximcdma, dmaconfig);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: MCDMA config initialization failed\r\n", __FILE__, __LINE__));
return XST_FAILURE;
}
/* Setup Rx/Tx chan and Interrupts */
for (ChanId = 1;
ChanId <= xaxiemacif->axi_ethernet.Config.AxiMcDmaChan_Cnt;
ChanId++) {
status = axi_mcdma_setup_rx_chan(xemac, ChanId);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: MCDMA Rx chan setup failed\r\n", __FILE__, __LINE__));
return XST_FAILURE;
}
status = axi_mcdma_setup_tx_chan(xemac, ChanId);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: MCDMA Tx chan setup failed\r\n", __FILE__, __LINE__));
return XST_FAILURE;
}
axi_mcdma_register_handlers(xemac, ChanId);
}
return XST_SUCCESS;
}

View File

@ -0,0 +1,834 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "netif/xaxiemacif.h"
#include "lwipopts.h"
#include "sleep.h"
#include "xemac_ieee_reg.h"
#define PHY_R0_ISOLATE 0x0400
#define PHY_DETECT_REG 1
#define PHY_IDENTIFIER_1_REG 2
#define PHY_IDENTIFIER_2_REG 3
#define PHY_DETECT_MASK 0x1808
#define PHY_MARVELL_IDENTIFIER 0x0141
#define PHY_TI_IDENTIFIER 0x2000
/* Marvel PHY flags */
#define MARVEL_PHY_IDENTIFIER 0x141
#define MARVEL_PHY_MODEL_NUM_MASK 0x3F0
#define MARVEL_PHY_88E1111_MODEL 0xC0
#define MARVEL_PHY_88E1116R_MODEL 0x240
#define PHY_88E1111_RGMII_RX_CLOCK_DELAYED_MASK 0x0080
/* TI PHY Flags */
#define TI_PHY_DETECT_MASK 0x796D
#define TI_PHY_IDENTIFIER 0x2000
#define TI_PHY_DP83867_MODEL 0xA231
#define DP83867_RGMII_CLOCK_DELAY_CTRL_MASK 0x0003
#define DP83867_RGMII_TX_CLOCK_DELAY_MASK 0x0030
#define DP83867_RGMII_RX_CLOCK_DELAY_MASK 0x0003
/* TI DP83867 PHY Registers */
#define DP83867_R32_RGMIICTL1 0x32
#define DP83867_R86_RGMIIDCTL 0x86
#define TI_PHY_REGCR 0xD
#define TI_PHY_ADDDR 0xE
#define TI_PHY_PHYCTRL 0x10
#define TI_PHY_CFGR2 0x14
#define TI_PHY_SGMIITYPE 0xD3
#define TI_PHY_CFGR2_SGMII_AUTONEG_EN 0x0080
#define TI_PHY_SGMIICLK_EN 0x4000
#define TI_PHY_REGCR_DEVAD_EN 0x001F
#define TI_PHY_REGCR_DEVAD_DATAEN 0x4000
#define TI_PHY_CFGR2_MASK 0x003F
#define TI_PHY_REGCFG4 0x31
#define TI_PHY_REGCR_DATA 0x401F
#define TI_PHY_CFG4RESVDBIT7 0x80
#define TI_PHY_CFG4RESVDBIT8 0x100
#define TI_PHY_CFG4_AUTONEG_TIMER 0x60
#define TI_PHY_CFG2_SPEEDOPT_10EN 0x0040
#define TI_PHY_CFG2_SGMII_AUTONEGEN 0x0080
#define TI_PHY_CFG2_SPEEDOPT_ENH 0x0100
#define TI_PHY_CFG2_SPEEDOPT_CNT 0x0800
#define TI_PHY_CFG2_SPEEDOPT_INTLOW 0x2000
#define TI_PHY_CR_SGMII_EN 0x0800
/* Loop counters to check for reset done
*/
#define RESET_TIMEOUT 0xFFFF
#define AUTO_NEG_TIMEOUT 0x00FFFFFF
#define IEEE_CTRL_RESET 0x9140
#define IEEE_CTRL_ISOLATE_DISABLE 0xFBFF
#define PHY_XILINX_PCS_PMA_ID1 0x0174
#define PHY_XILINX_PCS_PMA_ID2 0x0C00
#ifdef SDT
#define XPAR_AXIETHERNET_0_PHYADDR XPAR_XAXIETHERNET_0_PHYADDR
#define XPAR_AXIETHERNET_0_BASEADDR XPAR_XAXIETHERNET_0_BASEADDR
#endif
extern u32_t phyaddrforemac;
static void __attribute__ ((noinline)) AxiEthernetUtilPhyDelay(unsigned int Seconds);
static int detect_phy(XAxiEthernet *xaxiemacp)
{
u16 phy_reg;
u16 phy_id;
u32 phy_addr;
for (phy_addr = 31; phy_addr > 0; phy_addr--) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_DETECT_REG,
&phy_reg);
if ((phy_reg != 0xFFFF) &&
((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
/* Found a valid PHY address */
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: PHY detected at address %d.\r\n", phy_addr));
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: PHY detected.\r\n"));
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG,
&phy_reg);
if ((phy_reg != PHY_MARVELL_IDENTIFIER) &&
(phy_reg != TI_PHY_IDENTIFIER)){
xil_printf("WARNING: Not a Marvell or TI Ethernet PHY. Please verify the initialization sequence\r\n");
}
phyaddrforemac = phy_addr;
return phy_addr;
}
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID1) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID2) {
/* Found a valid PHY address */
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: PHY detected at address %d.\r\n",
phy_addr));
phyaddrforemac = phy_addr;
return phy_addr;
}
}
}
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: No PHY detected. Assuming a PHY at address 0\r\n"));
/* default to zero */
return 0;
}
static int isphy_pcspma(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 phy_id;
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID1) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID2) {
return 1;
}
}
return 0;
}
void XAxiEthernet_PhyReadExtended(XAxiEthernet *InstancePtr, u32 PhyAddress,
u32 RegisterNum, u16 *PhyDataPtr)
{
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_DEVAD_MASK);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, RegisterNum);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_NOPIDEVAD_MASK);
XAxiEthernet_PhyRead(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, PhyDataPtr);
}
void XAxiEthernet_PhyWriteExtended(XAxiEthernet *InstancePtr, u32 PhyAddress,
u32 RegisterNum, u16 PhyDataPtr)
{
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_DEVAD_MASK);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, RegisterNum);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_NOPIDEVAD_MASK);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, PhyDataPtr);
}
unsigned int get_phy_negotiated_speed (XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 control;
u16 status;
u16 partner_capabilities;
u16 partner_capabilities_1000;
u16 phylinkspeed;
u16 temp;
phy_addr = detect_phy(xaxiemacp);
xil_printf("Start PHY autonegotiation \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
if (isphy_pcspma(xaxiemacp, phy_addr)) {
control &= IEEE_CTRL_ISOLATE_DISABLE;
}
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
control);
if (isphy_pcspma(xaxiemacp, phy_addr)) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
xil_printf("Waiting for PHY to complete autonegotiation \r\n");
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
AxiEthernetUtilPhyDelay(1);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET,
&status);
}
xil_printf("Autonegotiation complete \r\n");
if (xaxiemacp->Config.Speed == XAE_SPEED_2500_MBPS)
return XAE_SPEED_2500_MBPS;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) == XAE_PHY_TYPE_1000BASE_X) {
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) == XAE_PHY_TYPE_1000BASE_X) {
#endif
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 1);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
if ((temp & 0x0020) == 0x0020) {
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
return 1000;
}
else {
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
xil_printf("Link error, temp = %x\r\n", temp);
return 0;
}
#ifndef SDT
} else if(XAxiEthernet_GetPhysicalInterface(xaxiemacp) == XAE_PHY_TYPE_SGMII) {
#else
} else if(XAxiEthernet_Get_Phy_Interface(xaxiemacp) == XAE_PHY_TYPE_SGMII) {
#endif
xil_printf("Waiting for Link to be up; Polling for SGMII core Reg \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
while(!(temp & 0x8000)) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
}
if((temp & 0x0C00) == 0x0800) {
return 1000;
}
else if((temp & 0x0C00) == 0x0400) {
return 100;
}
else if((temp & 0x0C00) == 0x0000) {
return 10;
} else {
xil_printf("get_IEEE_phy_speed(): Invalid speed bit value, Defaulting to Speed = 10 Mbps\r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &temp);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, 0x0100);
return 10;
}
}
}
/* Read PHY control and status registers is successful. */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET,
&status);
if ((control & IEEE_CTRL_AUTONEGOTIATE_ENABLE) && (status &
IEEE_STAT_AUTONEGOTIATE_CAPABLE)) {
xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_STATUS_REG_OFFSET,
&status);
}
xil_printf("autonegotiation complete \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET,
&partner_capabilities);
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_3_REG_OFFSET,
&partner_capabilities_1000);
if (partner_capabilities_1000 &
IEEE_AN3_ABILITY_MASK_1GBPS)
return 1000;
}
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_100MBPS)
return 100;
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_10MBPS)
return 10;
xil_printf("%s: unknown PHY link speed, setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__);
return 10;
} else {
/* Update TEMAC speed accordingly */
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
/* Get commanded link speed */
phylinkspeed = control &
IEEE_CTRL_1GBPS_LINKSPEED_MASK;
switch (phylinkspeed) {
case (IEEE_CTRL_LINKSPEED_1000M):
return 1000;
case (IEEE_CTRL_LINKSPEED_100M):
return 100;
case (IEEE_CTRL_LINKSPEED_10M):
return 10;
default:
xil_printf("%s: unknown PHY link speed (%d), setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__, phylinkspeed);
return 10;
}
} else {
return (control & IEEE_CTRL_LINKSPEED_MASK) ? 100 : 10;
}
}
}
unsigned int get_phy_speed_TI_DP83867(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 phy_val;
u16 control;
xil_printf("Start PHY autonegotiation \r\n");
/* Changing the PHY RX and TX DELAY settings. */
XAxiEthernet_PhyReadExtended(xaxiemacp, phy_addr, DP83867_R32_RGMIICTL1, &phy_val);
phy_val |= DP83867_RGMII_CLOCK_DELAY_CTRL_MASK;
XAxiEthernet_PhyWriteExtended(xaxiemacp, phy_addr, DP83867_R32_RGMIICTL1, phy_val);
XAxiEthernet_PhyReadExtended(xaxiemacp, phy_addr, DP83867_R86_RGMIIDCTL, &phy_val);
phy_val &= 0xFF00;
phy_val |= DP83867_RGMII_TX_CLOCK_DELAY_MASK;
phy_val |= DP83867_RGMII_RX_CLOCK_DELAY_MASK;
XAxiEthernet_PhyWriteExtended(xaxiemacp, phy_addr, DP83867_R86_RGMIIDCTL, phy_val);
/* Set advertised speeds for 10/100/1000Mbps modes. */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
control |= IEEE_ASYMMETRIC_PAUSE_MASK;
control |= IEEE_PAUSE_MASK;
control |= ADVERTISE_100;
control |= ADVERTISE_10;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, &control);
control |= ADVERTISE_1000;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, control);
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
unsigned int get_phy_speed_TI_DP83867_SGMII(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 control;
u16 temp;
u16 phyregtemp;
xil_printf("Start TI PHY autonegotiation \r\n");
/* Enable SGMII Clock */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIITYPE);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN | TI_PHY_REGCR_DEVAD_DATAEN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIICLK_EN);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
control |= (IEEE_CTRL_AUTONEGOTIATE_ENABLE | IEEE_CTRL_LINKSPEED_1000M |
IEEE_CTRL_FULL_DUPLEX);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, TI_PHY_CFGR2, &control);
control &= TI_PHY_CFGR2_MASK;
control |= (TI_PHY_CFG2_SPEEDOPT_10EN |
TI_PHY_CFG2_SGMII_AUTONEGEN |
TI_PHY_CFG2_SPEEDOPT_ENH |
TI_PHY_CFG2_SPEEDOPT_CNT |
TI_PHY_CFG2_SPEEDOPT_INTLOW);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_CFGR2, control);
/* Disable RGMII */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
DP83867_R32_RGMIICTL1);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN | TI_PHY_REGCR_DEVAD_DATAEN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, 0);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_PHYCTRL,
TI_PHY_CR_SGMII_EN);
xil_printf("Waiting for Link to be up \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
while(!(temp & 0x4000)) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
}
xil_printf("Auto negotiation completed for TI PHY\n\r");
/* SW workaround for unstable link when RX_CTRL is not STRAP MODE 3 or 4 */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, TI_PHY_REGCFG4);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DATA);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, TI_PHY_ADDDR, (u16_t *)&phyregtemp);
phyregtemp &= ~(TI_PHY_CFG4RESVDBIT7);
phyregtemp |= TI_PHY_CFG4RESVDBIT8;
phyregtemp &= ~(TI_PHY_CFG4_AUTONEG_TIMER);
phyregtemp |= TI_PHY_CFG4_AUTONEG_TIMER;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, TI_PHY_REGCFG4);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DATA);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, phyregtemp);
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
unsigned int get_phy_speed_88E1116R(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 phy_val;
u16 control;
u16 status;
u16 partner_capabilities;
xil_printf("Start PHY autonegotiation \r\n");
XAxiEthernet_PhyWrite(xaxiemacp,phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 2);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, &control);
control |= IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, control);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
control |= IEEE_ASYMMETRIC_PAUSE_MASK;
control |= IEEE_PAUSE_MASK;
control |= ADVERTISE_100;
control |= ADVERTISE_10;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
&control);
control |= ADVERTISE_1000;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
control);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_COPPER_SPECIFIC_CONTROL_REG,
&control);
control |= (7 << 12); /* max number of gigabit atphy_valts */
control |= (1 << 11); /* enable downshift */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_COPPER_SPECIFIC_CONTROL_REG,
control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
control |= IEEE_CTRL_RESET_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
while (1) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
if (control & IEEE_CTRL_RESET_MASK)
continue;
else
break;
}
xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
AxiEthernetUtilPhyDelay(1);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_COPPER_SPECIFIC_STATUS_REG_2,
&phy_val);
if (phy_val & IEEE_AUTONEG_ERROR_MASK) {
xil_printf("Auto negotiation error \r\n");
}
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET,
&status);
}
xil_printf("autonegotiation complete \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_SPECIFIC_STATUS_REG,
&partner_capabilities);
if ( ((partner_capabilities >> 14) & 3) == 2)/* 1000Mbps */
return 1000;
else if ( ((partner_capabilities >> 14) & 3) == 1)/* 100Mbps */
return 100;
else /* 10Mbps */
return 10;
}
unsigned int get_phy_speed_88E1111 (XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 control;
int TimeOut;
u16 phy_val;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_2_0) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_EXT_PHY_SPECIFIC_CONTROL_REG, &phy_val);
phy_val |= PHY_88E1111_RGMII_RX_CLOCK_DELAYED_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_EXT_PHY_SPECIFIC_CONTROL_REG, phy_val);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_RESET_MASK);
TimeOut = RESET_TIMEOUT;
while (TimeOut) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_CONTROL_REG_OFFSET, &control);
if (!(control & IEEE_CTRL_RESET_MASK))
break;
TimeOut -= 1;
}
if (!TimeOut) {
xil_printf("%s: Phy Reset failed\n\r", __FUNCTION__);
return 0;
}
}
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
ADVERTISE_1000);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100_AND_10);
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
unsigned get_IEEE_phy_speed(XAxiEthernet *xaxiemacp)
{
u16 phy_identifier;
u16 phy_model;
u8 phytype;
#ifdef XPAR_AXIETHERNET_0_BASEADDR
u32 phy_addr = detect_phy(xaxiemacp);
/* Get the PHY Identifier and Model number */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG, &phy_identifier);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG, &phy_model);
/* Depending upon what manufacturer PHY is connected, a different mask is
* needed to determine the specific model number of the PHY. */
if (phy_identifier == MARVEL_PHY_IDENTIFIER) {
phy_model = phy_model & MARVEL_PHY_MODEL_NUM_MASK;
if (phy_model == MARVEL_PHY_88E1116R_MODEL) {
return get_phy_speed_88E1116R(xaxiemacp, phy_addr);
} else if (phy_model == MARVEL_PHY_88E1111_MODEL) {
return get_phy_speed_88E1111(xaxiemacp, phy_addr);
}
} else if (phy_identifier == TI_PHY_IDENTIFIER) {
phy_model = phy_model & TI_PHY_DP83867_MODEL;
#ifndef SDT
phytype = XAxiEthernet_GetPhysicalInterface(xaxiemacp);
#else
phytype = XAxiEthernet_Get_Phy_Interface(xaxiemacp);
#endif
if (phy_model == TI_PHY_DP83867_MODEL && phytype == XAE_PHY_TYPE_SGMII) {
return get_phy_speed_TI_DP83867_SGMII(xaxiemacp, phy_addr);
}
if (phy_model == TI_PHY_DP83867_MODEL) {
return get_phy_speed_TI_DP83867(xaxiemacp, phy_addr);
}
}
else {
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet get_IEEE_phy_speed: Detected PHY with unknown identifier/model.\r\n"));
}
#endif
if (isphy_pcspma(xaxiemacp, phy_addr)) {
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
}
unsigned configure_IEEE_phy_speed(XAxiEthernet *xaxiemacp, unsigned speed)
{
u16 control;
u32 phy_addr = detect_phy(xaxiemacp);
u16 phy_val;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_2_0) {
/* Setting Tx and Rx Delays for RGMII mode */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0x2);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, &phy_val);
phy_val |= IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, phy_val);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0x0);
}
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
control &= ~IEEE_CTRL_LINKSPEED_1000M;
control &= ~IEEE_CTRL_LINKSPEED_100M;
control &= ~IEEE_CTRL_LINKSPEED_10M;
if (speed == 1000) {
control |= IEEE_CTRL_LINKSPEED_1000M;
}
else if (speed == 100) {
control |= IEEE_CTRL_LINKSPEED_100M;
/* Don't advertise PHY speed of 1000 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 10 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100);
}
else if (speed == 10) {
control |= IEEE_CTRL_LINKSPEED_10M;
/* Don't advertise PHY speed of 1000 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 100 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_10);
}
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_RESET_MASK);
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_SGMII) {
control &= (~PHY_R0_ISOLATE);
XAxiEthernet_PhyWrite(xaxiemacp,
XPAR_AXIETHERNET_0_PHYADDR,
IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_AUTONEGOTIATE_ENABLE);
}
{
volatile int wait;
for (wait=0; wait < 100000; wait++);
for (wait=0; wait < 100000; wait++);
}
return 0;
}
unsigned phy_setup_axiemac (XAxiEthernet *xaxiemacp)
{
unsigned link_speed = 1000;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_1_3) {
; /* Add PHY initialization code for RGMII 1.3 */
#ifndef SDT
} else if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
} else if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_2_0) {
; /* Add PHY initialization code for RGMII 2.0 */
#ifndef SDT
} else if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
} else if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_SGMII) {
#ifdef CONFIG_LINKSPEED_AUTODETECT
u32 phy_wr_data = IEEE_CTRL_AUTONEGOTIATE_ENABLE |
IEEE_CTRL_LINKSPEED_1000M;
phy_wr_data &= (~PHY_R0_ISOLATE);
XAxiEthernet_PhyWrite(xaxiemacp,
XPAR_AXIETHERNET_0_PHYADDR,
IEEE_CONTROL_REG_OFFSET,
phy_wr_data);
#endif
#ifndef SDT
} else if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
} else if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_1000BASE_X) {
; /* Add PHY initialization code for 1000 Base-X */
}
/* set PHY <--> MAC data clock */
#ifdef CONFIG_LINKSPEED_AUTODETECT
link_speed = get_IEEE_phy_speed(xaxiemacp);
xil_printf("auto-negotiated link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED1000)
link_speed = 1000;
configure_IEEE_phy_speed(xaxiemacp, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED100)
link_speed = 100;
configure_IEEE_phy_speed(xaxiemacp, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED10)
link_speed = 10;
configure_IEEE_phy_speed(xaxiemacp, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#endif
return link_speed;
}
static void __attribute__ ((noinline)) AxiEthernetUtilPhyDelay(unsigned int Seconds)
{
#if defined (__MICROBLAZE__)
static int WarningFlag = 0;
/* If MB caches are disabled or do not exist, this delay loop could
* take minutes instead of seconds (e.g., 30x longer). Print a warning
* message for the user (once). If only MB had a built-in timer!
*/
if (((mfmsr() & 0x20) == 0) && (!WarningFlag)) {
WarningFlag = 1;
}
#define ITERS_PER_SEC (XPAR_CPU_CORE_CLOCK_FREQ_HZ / 6)
__asm volatile ("\n"
"1: \n\t"
"addik r7, r0, %0 \n\t"
"2: \n\t"
"addik r7, r7, -1 \n\t"
"bneid r7, 2b \n\t"
"or r0, r0, r0 \n\t"
"bneid %1, 1b \n\t"
"addik %1, %1, -1 \n\t"
:: "i"(ITERS_PER_SEC), "d" (Seconds));
#else
sleep(Seconds);
#endif
}
void enable_sgmii_clock(XAxiEthernet *xaxiemacp)
{
u16 phy_identifier;
u16 phy_model;
u8 phytype;
XAxiEthernet_PhySetMdioDivisor(xaxiemacp, XAE_MDIO_DIV_DFT);
u32 phy_addr = detect_phy(xaxiemacp);
/* Get the PHY Identifier and Model number */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG, &phy_identifier);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG, &phy_model);
if (phy_identifier == TI_PHY_IDENTIFIER) {
phy_model = phy_model & TI_PHY_DP83867_MODEL;
#ifndef SDT
phytype = XAxiEthernet_GetPhysicalInterface(xaxiemacp);
#else
phytype = XAxiEthernet_Get_Phy_Interface(xaxiemacp);
#endif
if (phy_model == TI_PHY_DP83867_MODEL && phytype == XAE_PHY_TYPE_SGMII) {
/* Enable SGMII Clock by switching to 6-wire mode */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIITYPE);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN | TI_PHY_REGCR_DEVAD_DATAEN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIICLK_EN);
}
}
}

View File

@ -0,0 +1,102 @@
/*
* Copyright (C) 2018 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XEMAC_IEEE_REGS_H_
#define __XEMAC_IEEE_REGS_H_
/* Advertisement control register. */
#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_100_AND_10 (ADVERTISE_10FULL | ADVERTISE_100FULL | \
ADVERTISE_10HALF | ADVERTISE_100HALF)
#define ADVERTISE_100 (ADVERTISE_100FULL | ADVERTISE_100HALF)
#define ADVERTISE_10 (ADVERTISE_10FULL | ADVERTISE_10HALF)
#define ADVERTISE_1000 0x0300
#define IEEE_CONTROL_REG_OFFSET 0
#define IEEE_STATUS_REG_OFFSET 1
#define IEEE_AUTONEGO_ADVERTISE_REG 4
#define IEEE_PARTNER_ABILITIES_1_REG_OFFSET 5
#define IEEE_PARTNER_ABILITIES_2_REG_OFFSET 8
#define IEEE_PARTNER_ABILITIES_3_REG_OFFSET 10
#define IEEE_1000_ADVERTISE_REG_OFFSET 9
#define IEEE_MMD_ACCESS_CONTROL_REG 13
#define IEEE_MMD_ACCESS_ADDRESS_DATA_REG 14
#define IEEE_COPPER_SPECIFIC_CONTROL_REG 16
#define IEEE_SPECIFIC_STATUS_REG 17
#define IEEE_COPPER_SPECIFIC_STATUS_REG_2 19
#define IEEE_EXT_PHY_SPECIFIC_CONTROL_REG 20
#define IEEE_CONTROL_REG_MAC 21
#define IEEE_PAGE_ADDRESS_REGISTER 22
#define IEEE_CTRL_1GBPS_LINKSPEED_MASK 0x2040
#define IEEE_CTRL_LINKSPEED_MASK 0x0040
#define IEEE_CTRL_LINKSPEED_1000M 0x0040
#define IEEE_CTRL_LINKSPEED_100M 0x2000
#define IEEE_CTRL_LINKSPEED_10M 0x0000
#define IEEE_CTRL_FULL_DUPLEX 0x100
#define IEEE_CTRL_RESET_MASK 0x8000
#define IEEE_CTRL_AUTONEGOTIATE_ENABLE 0x1000
#define IEEE_STAT_AUTONEGOTIATE_CAPABLE 0x0008
#define IEEE_STAT_AUTONEGOTIATE_COMPLETE 0x0020
#define IEEE_STAT_AUTONEGOTIATE_RESTART 0x0200
#define IEEE_STAT_LINK_STATUS 0x0004
#define IEEE_STAT_1GBPS_EXTENSIONS 0x0100
#define IEEE_AN1_ABILITY_MASK 0x1FE0
#define IEEE_AN3_ABILITY_MASK_1GBPS 0x0C00
#define IEEE_AN1_ABILITY_MASK_100MBPS 0x0380
#define IEEE_AN1_ABILITY_MASK_10MBPS 0x0060
#define IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK 0x0030
#define IEEE_SPEED_MASK 0xC000
#define IEEE_SPEED_1000 0x8000
#define IEEE_SPEED_100 0x4000
#define IEEE_ASYMMETRIC_PAUSE_MASK 0x0800
#define IEEE_PAUSE_MASK 0x0400
#define IEEE_AUTONEG_ERROR_MASK 0x8000
#define IEEE_MMD_ACCESS_CTRL_DEVAD_MASK 0x1F
#define IEEE_MMD_ACCESS_CTRL_PIDEVAD_MASK 0x801F
#define IEEE_MMD_ACCESS_CTRL_NOPIDEVAD_MASK 0x401F
#endif /* __XEMAC_IEEE_REGS_H_ */

View File

@ -0,0 +1,873 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#include "xlwipconfig.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#include "lwip/timeouts.h"
#endif
#include <stdio.h>
#include <string.h>
#include "lwip/opt.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/pbuf.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "netif/etharp.h"
#include "netif/xadapter.h"
#include "netif/xemacliteif.h"
#include "xstatus.h"
#include "netif/xpqueue.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#include "xscugic.h"
#define INTC_DIST_BASE_ADDR XPAR_SCUGIC_DIST_BASEADDR
#else
#include "xintc.h"
#endif
#else
#include "xinterrupt_wrap.h"
#endif
/* Define those to better describe your network interface. */
#define IFNAME0 'x'
#define IFNAME1 'e'
/* Advertisement control register. */
#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_100_AND_10 (ADVERTISE_10FULL | ADVERTISE_100FULL | \
ADVERTISE_10HALF | ADVERTISE_100HALF)
#define ADVERTISE_100 (ADVERTISE_100FULL | ADVERTISE_100HALF)
#define ADVERTISE_10 (ADVERTISE_10FULL | ADVERTISE_10HALF)
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#define EMACLITE_INTR_PRIORITY_SET_IN_GIC 0xA0
#define TRIG_TYPE_RISING_EDGE_SENSITIVE 0x3
#endif
#define IEEE_CONTROL_REG_OFFSET 0
#define IEEE_STATUS_REG_OFFSET 1
#define IEEE_AUTONEGO_ADVERTISE_REG 4
#define IEEE_PARTNER_ABILITIES_1_REG_OFFSET 5
#define IEEE_PARTNER_ABILITIES_2_REG_OFFSET 8
#define IEEE_PARTNER_ABILITIES_3_REG_OFFSET 10
#define IEEE_1000_ADVERTISE_REG_OFFSET 9
#define IEEE_CTRL_1GBPS_LINKSPEED_MASK 0x2040
#define IEEE_CTRL_LINKSPEED_MASK 0x0040
#define IEEE_CTRL_LINKSPEED_1000M 0x0040
#define IEEE_CTRL_LINKSPEED_100M 0x2000
#define IEEE_CTRL_LINKSPEED_10M 0x0000
#define IEEE_CTRL_RESET_MASK 0x8000
#define IEEE_CTRL_AUTONEGOTIATE_ENABLE 0x1000
#define IEEE_STAT_AUTONEGOTIATE_CAPABLE 0x0008
#define IEEE_STAT_AUTONEGOTIATE_COMPLETE 0x0020
#define IEEE_STAT_AUTONEGOTIATE_RESTART 0x0200
#define IEEE_STAT_1GBPS_EXTENSIONS 0x0100
#define IEEE_AN1_ABILITY_MASK 0x1FE0
#define IEEE_AN3_ABILITY_MASK_1GBPS 0x0C00
#define IEEE_AN1_ABILITY_MASK_100MBPS 0x0380
#define IEEE_AN1_ABILITY_MASK_10MBPS 0x0060
#define PHY_DETECT_REG 1
#define PHY_DETECT_MASK 0x1808
/* Forward declarations. */
static err_t xemacliteif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr);
unsigned get_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep);
unsigned configure_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep, unsigned speed);
/* The payload from multiple pbufs is assembled into a single contiguous
* area for transmission. Currently this is a global variable (it should really
* belong in the per netif structure), but that is ok since this can be used
* only in a protected context
*/
unsigned char xemac_tx_frame[XEL_MAX_FRAME_SIZE] __attribute__((aligned(64)));
#if !NO_SYS
extern u32 xInsideISR;
#endif
#ifndef XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Function Prototypes *********************************************/
/*
* Function prototypes of the functions used for registering Fast
* Interrupt Handlers
*/
static void XEmacLite_FastInterruptHandler(void)
__attribute__ ((fast_interrupt));
/**************** Variable Declarations **************************************/
/** Variables for Fast Interrupt handlers ***/
XEmacLite *xemaclitep_fast;
#endif
#endif
static void
xemacif_recv_handler(void *arg) {
struct xemac_s *xemac = (struct xemac_s *)(arg);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
XEmacLite *instance = xemacliteif->instance;
struct pbuf *p;
int len = 0;
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#if !NO_SYS
xInsideISR++;
#endif
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#else
#ifndef SDT
XIntc_AckIntr(xtopologyp->intc_baseaddr, 1 << xtopologyp->intc_emac_intr);
#endif
#endif
p = pbuf_alloc(PBUF_RAW, XEL_MAX_FRAME_SIZE, PBUF_POOL);
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
/* receive and just ignore the frame.
* we need to receive the frame because otherwise emaclite will
* not generate any other interrupts since it cannot receive,
* and we do not actively poll the emaclite
*/
XEmacLite_Recv(instance, xemac_tx_frame);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* receive the packet */
len = XEmacLite_Recv(instance, p->payload);
if (len == 0) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
pbuf_free(p);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* store it in the receive queue, where it'll be processed by xemacif input thread */
if (pq_enqueue(xemacliteif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
}
int transmit_packet(XEmacLite *instancep, void *packet, unsigned len)
{
XStatus result = 0;
/* there is space for a buffer, so transfer */
result = XEmacLite_Send(instancep, packet, len);
if (result != XST_SUCCESS) {
return -1;
}
return 0;
}
/*
* this function is always called with interrupts off
* this function also assumes that there is space to send in the Emaclite buffer
*/
static err_t
_unbuffered_low_level_output(XEmacLite *instancep, struct pbuf *p)
{
struct pbuf *q;
int total_len = 0;
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
for(q = p, total_len = 0; q != NULL; q = q->next) {
/* Send the data from the pbuf to the interface, one pbuf at a
time. The size of the data in each pbuf is kept in the ->len
variable. */
memcpy(xemac_tx_frame + total_len, q->payload, q->len);
total_len += q->len;
}
if (transmit_packet(instancep, xemac_tx_frame, total_len) < 0) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
}
#if ETH_PAD_SIZE
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif
#if LINK_STATS
lwip_stats.link.xmit++;
#endif /* LINK_STATS */
return ERR_OK;
}
/*
* low_level_output():
*
* Should do the actual transmission of the packet. The packet is
* contained in the pbuf that is passed to the function. This pbuf
* might be chained.
*
*/
static err_t
low_level_output(struct netif *netif, struct pbuf *p)
{
SYS_ARCH_DECL_PROTECT(lev);
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
XEmacLite *instance = xemacliteif->instance;
struct pbuf *q;
SYS_ARCH_PROTECT(lev);
/* check if space is available to send */
if (XEmacLite_TxBufferAvailable(instance) == TRUE) {
if (pq_qlength(xemacliteif->send_q)) { /* send backlog */
_unbuffered_low_level_output(instance, (struct pbuf *)pq_dequeue(xemacliteif->send_q));
} else { /* send current */
_unbuffered_low_level_output(instance, p);
SYS_ARCH_UNPROTECT(lev);
return ERR_OK;
}
}
/* if we cannot send the packet immediately, then make a copy of the whole packet
* into a separate pbuf and store it in send_q. We cannot enqueue the pbuf as is
* since parts of the pbuf may be modified inside lwIP.
*/
q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_POOL);
if (!q) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
SYS_ARCH_UNPROTECT(lev);
return ERR_MEM;
}
for (q->len = 0; p; p = p->next) {
memcpy(q->payload + q->len, p->payload, p->len);
q->len += p->len;
}
if (pq_enqueue(xemacliteif->send_q, (void *)q) < 0) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
SYS_ARCH_UNPROTECT(lev);
return ERR_MEM;
}
SYS_ARCH_UNPROTECT(lev);
return ERR_OK;
}
static void
xemacif_send_handler(void *arg) {
struct xemac_s *xemac = (struct xemac_s *)(arg);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
XEmacLite *instance = xemacliteif->instance;
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#if !NO_SYS
xInsideISR++;
#endif
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#else
#ifndef SDT
XIntc_AckIntr(xtopologyp->intc_baseaddr, 1 << xtopologyp->intc_emac_intr);
#endif
#endif
if (pq_qlength(xemacliteif->send_q) && (XEmacLite_TxBufferAvailable(instance) == TRUE)) {
struct pbuf *p = pq_dequeue(xemacliteif->send_q);
_unbuffered_low_level_output(instance, p);
pbuf_free(p);
}
#if !NO_SYS
xInsideISR--;
#endif
}
/*
* low_level_input():
*
* Should allocate a pbuf and transfer the bytes of the incoming
* packet from the interface into the pbuf.
*
*/
static struct pbuf *
low_level_input(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
/* see if there is data to process */
if (pq_qlength(xemacliteif->recv_q) == 0)
return NULL;
/* return one packet from receive q */
return (struct pbuf *)pq_dequeue(xemacliteif->recv_q);
}
/*
* xemacliteif_output():
*
* This function is called by the TCP/IP stack when an IP packet
* should be sent. It calls the function called low_level_output() to
* do the actual transmission of the packet.
*
*/
err_t
xemacliteif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr)
{
/* resolve hardware address, then send (or queue) packet */
return etharp_output(netif, p, ipaddr);
}
/*
* xemacliteif_input():
*
* This function should be called when a packet is ready to be read
* from the interface. It uses the function low_level_input() that
* should handle the actual reception of bytes from the network
* interface.
*
* Returns the number of packets read (max 1 packet on success,
* 0 if there are no packets)
*
*/
int
xemacliteif_input(struct netif *netif)
{
struct eth_hdr *ethhdr;
struct pbuf *p;
SYS_ARCH_DECL_PROTECT(lev);
#if !NO_SYS
while (1)
#endif
{
SYS_ARCH_PROTECT(lev);
/* move received packet into a new pbuf */
p = low_level_input(netif);
SYS_ARCH_UNPROTECT(lev);
/* no packet could be read, silently ignore this */
if (p == NULL)
return 0;
/* points to packet payload, which starts with an Ethernet header */
ethhdr = p->payload;
#if LINK_STATS
lwip_stats.link.recv++;
#endif /* LINK_STATS */
switch (htons(ethhdr->type)) {
/* IP or ARP packet? */
case ETHTYPE_IP:
case ETHTYPE_ARP:
#if PPPOE_SUPPORT
/* PPPoE packet? */
case ETHTYPE_PPPOEDISC:
case ETHTYPE_PPPOE:
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (netif->input(p, netif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("xlltemacif_input: IP input error\r\n"));
pbuf_free(p);
p = NULL;
}
break;
default:
pbuf_free(p);
p = NULL;
break;
}
}
return 1;
}
#if !NO_SYS
static void
arp_timer(void *arg)
{
etharp_tmr();
sys_timeout(ARP_TMR_INTERVAL, arp_timer, NULL);
}
#endif
static XEmacLite_Config *
xemaclite_lookup_config(unsigned base)
{
XEmacLite_Config *CfgPtr = NULL;
int i;
for (i = 0; i < XPAR_XEMACLITE_NUM_INSTANCES; i++)
if (XEmacLite_ConfigTable[i].BaseAddress == base) {
CfgPtr = &XEmacLite_ConfigTable[i];
break;
}
return CfgPtr;
}
static err_t low_level_init(struct netif *netif)
{
struct xemac_s *xemac;
XEmacLite_Config *config;
XEmacLite *xemaclitep;
struct xtopology_t *xtopologyp;
xemacliteif_s *xemacliteif;
unsigned link_speed = 1000;
xemaclitep = mem_malloc(sizeof *xemaclitep);
#ifndef XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ
#if XPAR_INTC_0_HAS_FAST == 1
xemaclitep_fast = xemaclitep;
#endif
#endif
if (xemaclitep == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacliteif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac = mem_malloc(sizeof *xemac);
if (xemac == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacliteif_init: out of memory\r\n"));
return ERR_MEM;
}
xemacliteif = mem_malloc(sizeof *xemacliteif);
if (xemacliteif == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacliteif_init: out of memory\r\n"));
return ERR_MEM;
}
/* obtain pointer to topology structure for this emac */
xemac->topology_index = xtopology_find_index((unsigned)(netif->state));
xtopologyp = &xtopology[xemac->topology_index];
/* obtain config of this emaclite */
config = xemaclite_lookup_config((unsigned)(netif->state));
/* maximum transfer unit */
netif->mtu = XEL_MTU_SIZE;
/* broadcast capability */
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP;
/* initialize the mac */
#ifndef SDT
XEmacLite_Initialize(xemaclitep, config->DeviceId);
#else
XEmacLite_Initialize(xemaclitep, config->BaseAddress);
#endif
xemaclitep->NextRxBufferToUse = 0;
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xtopologyp->intc_emac_intr,
(Xil_ExceptionHandler)XEmacLite_InterruptHandler,
xemaclitep);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xtopologyp->intc_emac_intr,
EMACLITE_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xtopologyp->intc_emac_intr);
#else
#if NO_SYS
#if XPAR_INTC_0_HAS_FAST == 1
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XFastInterruptHandler)XEmacLite_FastInterruptHandler);
#else
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XInterruptHandler)XEmacLite_InterruptHandler,
xemaclitep);
#endif
#else
#if XPAR_INTC_0_HAS_FAST == 1
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XFastInterruptHandler)XEmacLite_FastInterruptHandler);
XIntc_EnableIntr(xtopologyp->intc_baseaddr, XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET) | (1 << xtopologyp->intc_emac_intr));
#else
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XInterruptHandler)XEmacLite_InterruptHandler,
xemaclitep);
XIntc_EnableIntr(xtopologyp->intc_baseaddr, XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET) | (1 << xtopologyp->intc_emac_intr));
#endif
#endif
#endif
#else
XSetupInterruptSystem(xemaclitep, &XEmacLite_InterruptHandler,
config->IntrId,
config->IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
#endif
/* set mac address */
XEmacLite_SetMacAddress(xemaclitep, (unsigned char*)(netif->hwaddr));
/* flush any frames already received */
XEmacLite_FlushReceive(xemaclitep);
/* set Rx, Tx interrupt handlers */
XEmacLite_SetRecvHandler(xemaclitep, (void *)(xemac), xemacif_recv_handler);
XEmacLite_SetSendHandler(xemaclitep, (void *)(xemac), xemacif_send_handler);
/* enable Rx, Tx interrupts */
XEmacLite_EnableInterrupts(xemaclitep);
#if !NO_SYS
sys_sem_new(&xemac->sem_rx_data_available, 0);
#endif
/* replace the state in netif (currently the base address of emaclite)
* with the xemacliteif instance pointer.
* this contains a pointer to the config table entry
*/
xemac->type = xemac_type_xps_emaclite;
xemac->state = (void *)xemacliteif;
netif->state = (void *)xemac;
xemacliteif->instance = xemaclitep;
xemacliteif->recv_q = pq_create_queue();
if (!xemacliteif->recv_q)
return ERR_MEM;
xemacliteif->send_q = pq_create_queue();
if (!xemacliteif->send_q)
return ERR_MEM;
/* Initialize PHY */
/* set PHY <--> MAC data clock */
#ifdef CONFIG_LINKSPEED_AUTODETECT
link_speed = get_IEEE_phy_speed_emaclite(xemaclitep);
xil_printf("auto-negotiated link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED1000)
xil_printf("Link speed of 1000 Mbps not possible\r\n");
#elif defined(CONFIG_LINKSPEED100)
link_speed = 100;
configure_IEEE_phy_speed_emaclite(xemaclitep, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED10)
link_speed = 10;
configure_IEEE_phy_speed_emaclite(xemaclitep, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#endif
return ERR_OK;
}
static int detect_phy_emaclite(XEmacLite *xemaclitep)
{
u16 phy_reg;
u32 phy_addr;
for (phy_addr = 31; phy_addr > 0; phy_addr--) {
XEmacLite_PhyRead(xemaclitep, phy_addr, PHY_DETECT_REG, &phy_reg);
if ((phy_reg != 0xFFFF) &&
((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
/* Found a valid PHY address */
LWIP_DEBUGF(NETIF_DEBUG, ("XEMacLite detect_phy: PHY detected at address %d.\r\n", phy_addr));
LWIP_DEBUGF(NETIF_DEBUG, ("XEMacLite detect_phy: PHY detected.\r\n"));
return phy_addr;
}
}
LWIP_DEBUGF(NETIF_DEBUG, ("XEMacLite detect_phy: No PHY detected. Assuming a PHY at address 0\r\n"));
/* default to zero */
return 0;
}
unsigned get_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep)
{
u16 control;
u16 status;
u16 partner_capabilities;
u16 partner_capabilities_1000;
u16 phylinkspeed;
u32 phy_addr = detect_phy_emaclite(xemaclitep);
/* Don't advertise PHY speed of 1000 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Advertise PHY speed of 100 and 10 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100_AND_10);
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
control |= (IEEE_CTRL_AUTONEGOTIATE_ENABLE |
IEEE_STAT_AUTONEGOTIATE_RESTART);
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
control);
/* Read PHY control and status registers is successful. */
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_STATUS_REG_OFFSET,
&status);
if ((control & IEEE_CTRL_AUTONEGOTIATE_ENABLE) &&
(status & IEEE_STAT_AUTONEGOTIATE_CAPABLE)) {
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_STATUS_REG_OFFSET,
&status);
}
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET,
&partner_capabilities);
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_PARTNER_ABILITIES_3_REG_OFFSET,
&partner_capabilities_1000);
if (partner_capabilities_1000 & IEEE_AN3_ABILITY_MASK_1GBPS) return 1000;
}
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_100MBPS) return 100;
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_10MBPS) return 10;
xil_printf("%s: unknown PHY link speed, setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__);
return 10;
} else {
/* Update TEMAC speed accordingly */
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
/* Get commanded link speed */
phylinkspeed = control & IEEE_CTRL_1GBPS_LINKSPEED_MASK;
switch (phylinkspeed) {
case (IEEE_CTRL_LINKSPEED_1000M):
return 1000;
case (IEEE_CTRL_LINKSPEED_100M):
return 100;
case (IEEE_CTRL_LINKSPEED_10M):
return 10;
default:
xil_printf("%s: unknown PHY link speed (%d), setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__, phylinkspeed);
return 10;
}
} else {
return (control & IEEE_CTRL_LINKSPEED_MASK) ? 100 : 10;
}
}
}
unsigned configure_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep, unsigned speed)
{
u16 control;
u32 phy_addr = detect_phy_emaclite(xemaclitep);
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
control &= ~IEEE_CTRL_LINKSPEED_100M;
control &= ~IEEE_CTRL_LINKSPEED_10M;
if (speed == 100) {
control |= IEEE_CTRL_LINKSPEED_100M;
/* Don't advertise PHY speed of 1000 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 10 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100);
}
else if (speed == 10) {
control |= IEEE_CTRL_LINKSPEED_10M;
/* Don't advertise PHY speed of 1000 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 100 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_10);
}
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_RESET_MASK);
{
volatile int wait;
for (wait=0; wait < 100000; wait++);
for (wait=0; wait < 100000; wait++);
}
return 0;
}
/*
* xemacliteif_init():
*
* Should be called at the beginning of the program to set up the
* network interface. It calls the function low_level_init() to do the
* actual setup of the hardware.
*
*/
err_t
xemacliteif_init(struct netif *netif)
{
#if LWIP_SNMP
/* ifType ethernetCsmacd(6) @see RFC1213 */
netif->link_type = 6;
/* your link speed here */
netif->link_speed = ;
netif->ts = 0;
netif->ifinoctets = 0;
netif->ifinucastpkts = 0;
netif->ifinnucastpkts = 0;
netif->ifindiscards = 0;
netif->ifoutoctets = 0;
netif->ifoutucastpkts = 0;
netif->ifoutnucastpkts = 0;
netif->ifoutdiscards = 0;
#endif
netif->name[0] = IFNAME0;
netif->name[1] = IFNAME1;
netif->output = xemacliteif_output;
netif->linkoutput = low_level_output;
low_level_init(netif);
#if !NO_SYS
sys_timeout(ARP_TMR_INTERVAL, arp_timer, NULL);
#endif
return ERR_OK;
}
#ifndef XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ
#if XPAR_INTC_0_HAS_FAST == 1
/****************** Fast Interrupt Handler **********************************/
void XEmacLite_FastInterruptHandler (void)
{
XEmacLite_InterruptHandler((void *)xemaclitep_fast);
}
#endif
#endif

View File

@ -0,0 +1,796 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include <stdio.h>
#include <string.h>
#include <xparameters.h>
#include "lwipopts.h"
#include "xlwipconfig.h"
#include "lwip/opt.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/pbuf.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "lwip/igmp.h"
#include "netif/etharp.h"
#include "netif/xemacpsif.h"
#include "netif/xadapter.h"
#include "netif/xpqueue.h"
#include "xparameters.h"
#include "xscugic.h"
#include "xemacps.h"
#if LWIP_IPV6
#include "lwip/ethip6.h"
#endif
/* Define those to better describe your network interface. */
#define IFNAME0 't'
#define IFNAME1 'e'
#if LWIP_IGMP
static err_t xemacpsif_mac_filter_update (struct netif *netif,
ip_addr_t *group, u8_t action);
static u8_t xemacps_mcast_entry_mask = 0;
#endif
#if LWIP_IPV6 && LWIP_IPV6_MLD
static err_t xemacpsif_mld6_mac_filter_update (struct netif *netif,
ip_addr_t *group, u8_t action);
static u8_t xemacps_mld6_mcast_entry_mask;
#endif
XEmacPs_Config *mac_config;
struct netif *NetIf;
#if !NO_SYS
#if defined(__arm__) && !defined(ARMR5)
int32_t lExpireCounter = 0;
#define RESETRXTIMEOUT 10
#endif
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
extern volatile u32_t notifyinfo[4*XLWIP_CONFIG_N_TX_DESC];
#endif
/*
* this function is always called with interrupts off
* this function also assumes that there are available BD's
*/
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
static err_t _unbuffered_low_level_output(xemacpsif_s *xemacpsif,
struct pbuf *p, u32_t block_till_tx_complete, u32_t *to_block_index )
#else
static err_t _unbuffered_low_level_output(xemacpsif_s *xemacpsif,
struct pbuf *p)
#endif
{
XStatus status = 0;
err_t err = ERR_MEM;
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (block_till_tx_complete == 1) {
status = emacps_sgsend(xemacpsif, p, 1, to_block_index);
} else {
status = emacps_sgsend(xemacpsif, p, 0, to_block_index);
}
#else
status = emacps_sgsend(xemacpsif, p);
#endif
if (status != XST_SUCCESS) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
} else {
err = ERR_OK;
}
#if ETH_PAD_SIZE
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif
#if LINK_STATS
lwip_stats.link.xmit++;
#endif /* LINK_STATS */
return err;
}
/*
* low_level_output():
*
* Should do the actual transmission of the packet. The packet is
* contained in the pbuf that is passed to the function. This pbuf
* might be chained.
*
*/
static err_t low_level_output(struct netif *netif, struct pbuf *p)
{
err_t err = ERR_MEM;
s32_t freecnt;
XEmacPs_BdRing *txring;
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
u32_t notfifyblocksleepcntr;
u32_t to_block_index;
#endif
SYS_ARCH_DECL_PROTECT(lev);
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
SYS_ARCH_PROTECT(lev);
/* check if space is available to send */
freecnt = xemacps_is_tx_space_available(xemacpsif);
if (freecnt <= 5) {
txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
xemacps_process_sent_bds(xemacpsif, txring);
}
if (xemacps_is_tx_space_available(xemacpsif)) {
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
err = _unbuffered_low_level_output(xemacpsif, p, 1, &to_block_index);
} else {
err = _unbuffered_low_level_output(xemacpsif, p, 0, &to_block_index);
}
#else
err = _unbuffered_low_level_output(xemacpsif, p);
#endif
} else {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
xil_printf("pack dropped, no space\r\n");
SYS_ARCH_UNPROTECT(lev);
goto return_pack_dropped;
}
SYS_ARCH_UNPROTECT(lev);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
/* Wait for approx 1 second before timing out */
notfifyblocksleepcntr = 900000;
while(notifyinfo[to_block_index] == 1) {
usleep(1);
notfifyblocksleepcntr--;
if (notfifyblocksleepcntr <= 0) {
err = ERR_TIMEOUT;
break;
}
}
}
netif_clear_opt_block_tx(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET);
#endif
return_pack_dropped:
return err;
}
/*
* low_level_input():
*
* Should allocate a pbuf and transfer the bytes of the incoming
* packet from the interface into the pbuf.
*
*/
static struct pbuf * low_level_input(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
struct pbuf *p;
/* see if there is data to process */
if (pq_qlength(xemacpsif->recv_q) == 0)
return NULL;
/* return one packet from receive q */
p = (struct pbuf *)pq_dequeue(xemacpsif->recv_q);
return p;
}
/*
* xemacpsif_output():
*
* This function is called by the TCP/IP stack when an IP packet
* should be sent. It calls the function called low_level_output() to
* do the actual transmission of the packet.
*
*/
static err_t xemacpsif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr)
{
/* resolve hardware address, then send (or queue) packet */
return etharp_output(netif, p, ipaddr);
}
/*
* xemacpsif_input():
*
* This function should be called when a packet is ready to be read
* from the interface. It uses the function low_level_input() that
* should handle the actual reception of bytes from the network
* interface.
*
* Returns the number of packets read (max 1 packet on success,
* 0 if there are no packets)
*
*/
s32_t xemacpsif_input(struct netif *netif)
{
struct eth_hdr *ethhdr;
struct pbuf *p;
SYS_ARCH_DECL_PROTECT(lev);
#if !NO_SYS
while (1)
#endif
{
/* move received packet into a new pbuf */
SYS_ARCH_PROTECT(lev);
p = low_level_input(netif);
SYS_ARCH_UNPROTECT(lev);
/* no packet could be read, silently ignore this */
if (p == NULL) {
return 0;
}
/* points to packet payload, which starts with an Ethernet header */
ethhdr = p->payload;
#if LINK_STATS
lwip_stats.link.recv++;
#endif /* LINK_STATS */
switch (htons(ethhdr->type)) {
/* IP or ARP packet? */
case ETHTYPE_IP:
case ETHTYPE_ARP:
#if LWIP_IPV6
/*IPv6 Packet?*/
case ETHTYPE_IPV6:
#endif
#if PPPOE_SUPPORT
/* PPPoE packet? */
case ETHTYPE_PPPOEDISC:
case ETHTYPE_PPPOE:
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (netif->input(p, netif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacpsif_input: IP input error\r\n"));
pbuf_free(p);
p = NULL;
}
break;
default:
pbuf_free(p);
p = NULL;
break;
}
}
return 1;
}
#if !NO_SYS
#if defined(__arm__) && !defined(ARMR5)
void vTimerCallback( TimerHandle_t pxTimer )
{
/* Do something if the pxTimer parameter is NULL */
configASSERT(pxTimer);
lExpireCounter++;
/* If the timer has expired 100 times then reset RX */
if(lExpireCounter >= RESETRXTIMEOUT) {
lExpireCounter = 0;
xemacpsif_resetrx_on_no_rxdata(NetIf);
}
}
#endif
#endif
static err_t low_level_init(struct netif *netif)
{
UINTPTR mac_address = (UINTPTR)(netif->state);
struct xemac_s *xemac;
xemacpsif_s *xemacpsif;
u32 dmacrreg;
s32_t status = XST_SUCCESS;
NetIf = netif;
xemacpsif = mem_malloc(sizeof *xemacpsif);
if (xemacpsif == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacpsif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac = mem_malloc(sizeof *xemac);
if (xemac == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacpsif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac->state = (void *)xemacpsif;
xemac->topology_index = xtopology_find_index(mac_address);
xemac->type = xemac_type_emacps;
xemacpsif->send_q = NULL;
xemacpsif->recv_q = pq_create_queue();
if (!xemacpsif->recv_q)
return ERR_MEM;
/* maximum transfer unit */
#ifdef ZYNQMP_USE_JUMBO
netif->mtu = XEMACPS_MTU_JUMBO - XEMACPS_HDR_SIZE;
#else
netif->mtu = XEMACPS_MTU - XEMACPS_HDR_SIZE;
#endif
#if LWIP_IGMP
netif->igmp_mac_filter = xemacpsif_mac_filter_update;
#endif
#if LWIP_IPV6 && LWIP_IPV6_MLD
netif->mld_mac_filter = xemacpsif_mld6_mac_filter_update;
#endif
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP |
NETIF_FLAG_LINK_UP;
#if LWIP_IPV6 && LWIP_IPV6_MLD
netif->flags |= NETIF_FLAG_MLD6;
#endif
#if LWIP_IGMP
netif->flags |= NETIF_FLAG_IGMP;
#endif
#if !NO_SYS
sys_sem_new(&xemac->sem_rx_data_available, 0);
#endif
/* obtain config of this emac */
mac_config = (XEmacPs_Config *)xemacps_lookup_config((unsigned)(UINTPTR)netif->state);
#if defined (__aarch64__) && (EL1_NONSECURE == 1)
/* Request device to indicate that this library is using it */
if (mac_config->BaseAddress == VERSAL_EMACPS_0_BASEADDR) {
Xil_Smc(PM_REQUEST_DEVICE_SMC_FID, DEV_GEM_0, 1, 0, 100, 1, 0, 0);
}
if (mac_config->BaseAddress == VERSAL_EMACPS_0_BASEADDR) {
Xil_Smc(PM_REQUEST_DEVICE_SMC_FID, DEV_GEM_1, 1, 0, 100, 1, 0, 0);
}
#endif
status = XEmacPs_CfgInitialize(&xemacpsif->emacps, mac_config,
mac_config->BaseAddress);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("In %s:EmacPs Configuration Failed....\r\n", __func__));
}
/* initialize the mac */
init_emacps(xemacpsif, netif);
dmacrreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_DMACR_OFFSET);
dmacrreg = dmacrreg | (0x00000010);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_DMACR_OFFSET, dmacrreg);
#if !NO_SYS
#if defined(__arm__) && !defined(ARMR5)
/* Freertos tick is 10ms by default; set period to the same */
xemac->xTimer = xTimerCreate("Timer", 10, pdTRUE, ( void * ) 1, vTimerCallback);
if (xemac->xTimer == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("In %s:Timer creation failed....\r\n", __func__));
} else {
if(xTimerStart(xemac->xTimer, 0) != pdPASS) {
LWIP_DEBUGF(NETIF_DEBUG, ("In %s:Timer start failed....\r\n", __func__));
}
}
#endif
#endif
setup_isr(xemac);
init_dma(xemac);
start_emacps(xemacpsif);
/* replace the state in netif (currently the emac baseaddress)
* with the mac instance pointer.
*/
netif->state = (void *)xemac;
return ERR_OK;
}
void HandleEmacPsError(struct xemac_s *xemac)
{
xemacpsif_s *xemacpsif;
s32_t status = XST_SUCCESS;
u32 dmacrreg;
SYS_ARCH_DECL_PROTECT(lev);
SYS_ARCH_PROTECT(lev);
xemacpsif = (xemacpsif_s *)(xemac->state);
free_txrx_pbufs(xemacpsif);
status = XEmacPs_CfgInitialize(&xemacpsif->emacps, mac_config,
mac_config->BaseAddress);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("In %s:EmacPs Configuration Failed....\r\n", __func__));
}
/* initialize the mac */
init_emacps_on_error(xemacpsif, NetIf);
dmacrreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_DMACR_OFFSET);
dmacrreg = dmacrreg | (0x01000000);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_DMACR_OFFSET, dmacrreg);
setup_isr(xemac);
init_dma(xemac);
start_emacps(xemacpsif);
SYS_ARCH_UNPROTECT(lev);
}
void HandleTxErrors(struct xemac_s *xemac)
{
xemacpsif_s *xemacpsif;
u32 netctrlreg;
SYS_ARCH_DECL_PROTECT(lev);
SYS_ARCH_PROTECT(lev);
xemacpsif = (xemacpsif_s *)(xemac->state);
netctrlreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET);
netctrlreg = netctrlreg & (~XEMACPS_NWCTRL_TXEN_MASK);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET, netctrlreg);
free_onlytx_pbufs(xemacpsif);
clean_dma_txdescs(xemac);
netctrlreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET);
netctrlreg = netctrlreg | (XEMACPS_NWCTRL_TXEN_MASK);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET, netctrlreg);
SYS_ARCH_UNPROTECT(lev);
}
#if LWIP_IPV6 && LWIP_IPV6_MLD
static u8_t xemacpsif_ip6_addr_ismulticast(ip6_addr_t* ip_addr)
{
if(ip6_addr_ismulticast_linklocal(ip_addr)||
ip6_addr_ismulticast_iflocal(ip_addr) ||
ip6_addr_ismulticast_adminlocal(ip_addr)||
ip6_addr_ismulticast_sitelocal(ip_addr) ||
ip6_addr_ismulticast_orglocal(ip_addr) ||
ip6_addr_ismulticast_global(ip_addr)) {
/*Return TRUE if IPv6 is Multicast type*/
return TRUE;
} else {
return FALSE;
}
}
static void xemacpsif_mld6_mac_hash_update (struct netif *netif, u8_t *ip_addr,
u8_t action)
{
u8_t multicast_mac_addr[6];
struct xemac_s *xemac = (struct xemac_s *) (netif->state);
xemacpsif_s *xemacpsif = (xemacpsif_s *) (xemac->state);
XEmacPs_BdRing *txring;
txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
multicast_mac_addr[0] = LL_IP6_MULTICAST_ADDR_0;
multicast_mac_addr[1] = LL_IP6_MULTICAST_ADDR_1;
multicast_mac_addr[2] = ip_addr[12];
multicast_mac_addr[3] = ip_addr[13];
multicast_mac_addr[4] = ip_addr[14];
multicast_mac_addr[5] = ip_addr[15];
/* Wait till all sent packets are acknowledged from HW */
while(txring->HwCnt);
SYS_ARCH_DECL_PROTECT(lev);
SYS_ARCH_PROTECT(lev);
/* Stop Ethernet */
XEmacPs_Stop(&xemacpsif->emacps);
if (action == NETIF_ADD_MAC_FILTER) {
/* Set Mulitcast mac address in hash table */
XEmacPs_SetHash(&xemacpsif->emacps, multicast_mac_addr);
} else if (action == NETIF_DEL_MAC_FILTER) {
/* Remove Mulitcast mac address in hash table */
XEmacPs_DeleteHash(&xemacpsif->emacps, multicast_mac_addr);
}
/* Reset DMA */
reset_dma(xemac);
/* Start Ethernet */
XEmacPs_Start(&xemacpsif->emacps);
SYS_ARCH_UNPROTECT(lev);
}
static err_t xemacpsif_mld6_mac_filter_update (struct netif *netif, ip_addr_t *group,
u8_t action)
{
u8_t temp_mask;
unsigned int i;
u8_t * ip_addr = (u8_t *) group;
if(!(xemacpsif_ip6_addr_ismulticast((ip6_addr_t*) ip_addr))) {
LWIP_DEBUGF(NETIF_DEBUG,
("%s: The requested MAC address is not a multicast address.\r\n", __func__)); LWIP_DEBUGF(NETIF_DEBUG,
("Multicast address add operation failure !!\r\n"));
return ERR_ARG;
}
if (action == NETIF_ADD_MAC_FILTER) {
for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xemacps_mld6_mcast_entry_mask & temp_mask) == temp_mask) {
continue;
}
xemacps_mld6_mcast_entry_mask |= temp_mask;
/* Update mac address in hash table */
xemacpsif_mld6_mac_hash_update(netif, ip_addr, action);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully added.\r\n", __func__));
return ERR_OK;
}
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers left.\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address add operation failure !!\r\n"));
return ERR_MEM;
} else if (action == NETIF_DEL_MAC_FILTER) {
for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xemacps_mld6_mcast_entry_mask & temp_mask) != temp_mask) {
continue;
}
xemacps_mld6_mcast_entry_mask &= (~temp_mask);
/* Update mac address in hash table */
xemacpsif_mld6_mac_hash_update(netif, ip_addr, action);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully removed.\r\n", __func__));
return ERR_OK;
}
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers present with\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("the requested Multicast MAC address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address removal failure!!.\r\n"));
return ERR_MEM;
}
return ERR_ARG;
}
#endif
#if LWIP_IGMP
static void xemacpsif_mac_hash_update (struct netif *netif, u8_t *ip_addr,
u8_t action)
{
u8_t multicast_mac_addr[6];
struct xemac_s *xemac = (struct xemac_s *) (netif->state);
xemacpsif_s *xemacpsif = (xemacpsif_s *) (xemac->state);
XEmacPs_BdRing *txring;
txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
multicast_mac_addr[0] = 0x01;
multicast_mac_addr[1] = 0x00;
multicast_mac_addr[2] = 0x5E;
multicast_mac_addr[3] = ip_addr[1] & 0x7F;
multicast_mac_addr[4] = ip_addr[2];
multicast_mac_addr[5] = ip_addr[3];
/* Wait till all sent packets are acknowledged from HW */
while(txring->HwCnt);
SYS_ARCH_DECL_PROTECT(lev);
SYS_ARCH_PROTECT(lev);
/* Stop Ethernet */
XEmacPs_Stop(&xemacpsif->emacps);
if (action == IGMP_ADD_MAC_FILTER) {
/* Set Mulitcast mac address in hash table */
XEmacPs_SetHash(&xemacpsif->emacps, multicast_mac_addr);
} else if (action == IGMP_DEL_MAC_FILTER) {
/* Remove Mulitcast mac address in hash table */
XEmacPs_DeleteHash(&xemacpsif->emacps, multicast_mac_addr);
}
/* Reset DMA */
reset_dma(xemac);
/* Start Ethernet */
XEmacPs_Start(&xemacpsif->emacps);
SYS_ARCH_UNPROTECT(lev);
}
static err_t xemacpsif_mac_filter_update (struct netif *netif, ip_addr_t *group,
u8_t action)
{
u8_t temp_mask;
unsigned int i;
u8_t * ip_addr = (u8_t *) group;
if ((ip_addr[0] < 224) && (ip_addr[0] > 239)) {
LWIP_DEBUGF(NETIF_DEBUG,
("%s: The requested MAC address is not a multicast address.\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast address add operation failure !!\r\n"));
return ERR_ARG;
}
if (action == IGMP_ADD_MAC_FILTER) {
for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xemacps_mcast_entry_mask & temp_mask) == temp_mask) {
continue;
}
xemacps_mcast_entry_mask |= temp_mask;
/* Update mac address in hash table */
xemacpsif_mac_hash_update(netif, ip_addr, action);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully added.\r\n", __func__));
return ERR_OK;
}
if (i == XEMACPS_MAX_MAC_ADDR) {
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers left.\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address add operation failure !!\r\n"));
return ERR_MEM;
}
} else if (action == IGMP_DEL_MAC_FILTER) {
for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xemacps_mcast_entry_mask & temp_mask) != temp_mask) {
continue;
}
xemacps_mcast_entry_mask &= (~temp_mask);
/* Update mac address in hash table */
xemacpsif_mac_hash_update(netif, ip_addr, action);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully removed.\r\n", __func__));
return ERR_OK;
}
if (i == XEMACPS_MAX_MAC_ADDR) {
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers present with\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("the requested Multicast MAC address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address removal failure!!.\r\n"));
return ERR_MEM;
}
}
return ERR_OK;
}
#endif
/*
* xemacpsif_init():
*
* Should be called at the beginning of the program to set up the
* network interface. It calls the function low_level_init() to do the
* actual setup of the hardware.
*
*/
err_t xemacpsif_init(struct netif *netif)
{
#if LWIP_SNMP
/* ifType ethernetCsmacd(6) @see RFC1213 */
netif->link_type = 6;
/* your link speed here */
netif->link_speed = ;
netif->ts = 0;
netif->ifinoctets = 0;
netif->ifinucastpkts = 0;
netif->ifinnucastpkts = 0;
netif->ifindiscards = 0;
netif->ifoutoctets = 0;
netif->ifoutucastpkts = 0;
netif->ifoutnucastpkts = 0;
netif->ifoutdiscards = 0;
#endif
netif->name[0] = IFNAME0;
netif->name[1] = IFNAME1;
netif->output = xemacpsif_output;
netif->linkoutput = low_level_output;
#if LWIP_IPV6
netif->output_ip6 = ethip6_output;
#endif
low_level_init(netif);
return ERR_OK;
}
/*
* xemacpsif_resetrx_on_no_rxdata():
*
* Should be called by the user at regular intervals, typically
* from a timer (100 msecond). This is to provide a SW workaround
* for the HW bug (SI #692601). Please refer to the function header
* for the function resetrx_on_no_rxdata in xemacpsif_dma.c to
* know more about the SI.
*
*/
void xemacpsif_resetrx_on_no_rxdata(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
resetrx_on_no_rxdata(xemacpsif);
}

View File

@ -0,0 +1,951 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#include "lwip/stats.h"
#include "lwip/sys.h"
#include "lwip/inet_chksum.h"
#include "netif/xadapter.h"
#include "netif/xemacpsif.h"
#include "xstatus.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#include "xparameters_ps.h"
#include "xil_exception.h"
#include "xil_mmu.h"
#if defined (ARMR5)
#include "xreg_cortexr5.h"
#endif
#ifdef CONFIG_XTRACE
#include "xtrace.h"
#endif
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#endif
#ifdef SDT
#include "xinterrupt_wrap.h"
#else
#define INTC_BASE_ADDR XPAR_SCUGIC_0_CPU_BASEADDR
#define INTC_DIST_BASE_ADDR XPAR_SCUGIC_0_DIST_BASEADDR
#endif
/* Byte alignment of BDs */
#define BD_ALIGNMENT (XEMACPS_DMABD_MINIMUM_ALIGNMENT*2)
/* A max of 4 different ethernet interfaces are supported */
static UINTPTR tx_pbufs_storage[4*XLWIP_CONFIG_N_TX_DESC];
static UINTPTR rx_pbufs_storage[4*XLWIP_CONFIG_N_RX_DESC];
static s32_t emac_intr_num;
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
volatile u32_t notifyinfo[4*XLWIP_CONFIG_N_TX_DESC];
#endif
/******************************************************************************
* Each BD is of 8 bytes of size and the BDs (BD chain) need to be put
* at uncached memory location. If they are not put at uncached
* locations, the user needs to flush or invalidate for each BD/packet.
* However, the flush or invalidate can happen over a cache line which can
* span multiple BDs. This means a flush or invalidate of one BD can actually
* flush/invalidate multiple BDs adjacent to the targeted BD.Assuming that
* the user and hardware both update the BD fields, this operation from user
* can potentially overwrite the updates done by hardware or user.
* To avoid this, it is always safe to put the BD chains for Rx and tx side
* at uncached memory location.
*
* The Xilinx standalone BSP for Cortex A9 implements only primary page tables.
* Each table entry corresponds to 1 MB of address map. This means, if a memory
* region has to be made uncached, the minimum granularity will be of 1 MB.
*
* The implementation below allocates a 1 MB of u8 array aligned to 1 MB.
* This ensures that this array is put at 1 MB aligned memory (e.g. 0x1200000)
* and accupies memory of 1 MB. The init_dma function then changes 1 MB of this
* region to make it uncached (strongly ordered).
* This increases the bss section of the program significantly and can be a
* wastage of memory. The reason beings, BDs will hardly occupy few KBs of
* memory and the rest of 1 MB of memory will be unused.
*
* If a program uses other peripherals that have DMAs/bus masters and need
* uncached memory, they may also end of following the same approach. This
* definitely aggravates the memory wastage issue. To avoid all this, the user
* can create a new 1 MB section in the linker script and reserve it for such
* use cases that need uncached memory location. They can then have their own
* memory allocation logic in their application that allocates uncached memory
* from this 1 MB location. For such a case, changes need to be done in this
* file and appropriate uncached memory allocated through other means can be
* used.
*
* The present implementation here allocates 1 MB of uncached memory. It
* reserves of 64 KB of memory for each BD chain. 64 KB of memory means 8192 of
* BDs for each BD chain which is more than enough for any application.
* Assuming that both emac0 and emac1 are present, 256 KB of memory is allocated
* for BDs. The rest 768 KB of memory is just unused.
*********************************************************************************/
#if defined __aarch64__
u8_t emac_bd_space[0x200000] __attribute__ ((aligned (0x200000)));
#else
u8_t emac_bd_space[0x100000] __attribute__ ((aligned (0x100000)));
#endif
static volatile u32_t bd_space_index = 0;
static volatile u32_t bd_space_attr_set = 0;
#if !NO_SYS
extern u32 xInsideISR;
#endif
#define XEMACPS_BD_TO_INDEX(ringptr, bdptr) \
(((UINTPTR)bdptr - (UINTPTR)(ringptr)->BaseBdAddr) / (ringptr)->Separation)
s32_t xemacps_is_tx_space_available(xemacpsif_s *emac)
{
XEmacPs_BdRing *txring;
s32_t freecnt = 0;
txring = &(XEmacPs_GetTxRing(&emac->emacps));
/* tx space is available as long as there are valid BD's */
freecnt = XEmacPs_BdRingGetFreeCnt(txring);
return freecnt;
}
static inline
u32_t get_base_index_txpbufsstorage (xemacpsif_s *xemacpsif)
{
u32_t index;
#ifdef XPAR_XEMACPS_0_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
index = 0;
}
#endif
#ifdef XPAR_XEMACPS_1_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
index = XLWIP_CONFIG_N_TX_DESC;
}
#endif
#ifdef XPAR_XEMACPS_2_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
index = 2 * XLWIP_CONFIG_N_TX_DESC;
}
#endif
#ifdef XPAR_XEMACPS_3_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
index = 3 * XLWIP_CONFIG_N_TX_DESC;
}
#endif
return index;
}
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
static inline
u32_t get_base_index_tasknotifyinfo (xemacpsif_s *xemacpsif)
{
u32_t index;
#ifdef XPAR_XEMACPS_0_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
index = 0;
}
#endif
#ifdef XPAR_XEMACPS_1_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
index = XLWIP_CONFIG_N_TX_DESC;
}
#endif
#ifdef XPAR_XEMACPS_2_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
index = 2 * XLWIP_CONFIG_N_TX_DESC;
}
#endif
#ifdef XPAR_XEMACPS_3_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
index = 3 * XLWIP_CONFIG_N_TX_DESC;
}
#endif
return index;
}
#endif
static inline
u32_t get_base_index_rxpbufsstorage (xemacpsif_s *xemacpsif)
{
u32_t index;
#ifdef XPAR_XEMACPS_0_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
index = 0;
}
#endif
#ifdef XPAR_XEMACPS_1_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
index = XLWIP_CONFIG_N_RX_DESC;
}
#endif
#ifdef XPAR_XEMACPS_2_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
index = 2 * XLWIP_CONFIG_N_RX_DESC;
}
#endif
#ifdef XPAR_XEMACPS_3_BASEADDR
if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
index = 3 * XLWIP_CONFIG_N_RX_DESC;
}
#endif
return index;
}
void xemacps_process_sent_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *txring)
{
XEmacPs_Bd *txbdset;
XEmacPs_Bd *curbdpntr;
s32_t n_bds;
XStatus status;
s32_t n_pbufs_freed = 0;
u32_t bdindex;
struct pbuf *p;
u32 *temp;
u32_t index;
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
u32_t tx_task_notifier_index;
#endif
index = get_base_index_txpbufsstorage (xemacpsif);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
tx_task_notifier_index = get_base_index_tasknotifyinfo (xemacpsif);
#endif
while (1) {
/* obtain processed BD's */
n_bds = XEmacPs_BdRingFromHwTx(txring,
XLWIP_CONFIG_N_TX_DESC, &txbdset);
if (n_bds == 0) {
return;
}
/* free the processed BD's */
n_pbufs_freed = n_bds;
curbdpntr = txbdset;
while (n_pbufs_freed > 0) {
bdindex = XEMACPS_BD_TO_INDEX(txring, curbdpntr);
temp = (u32 *)curbdpntr;
*temp = 0;
temp++;
if (bdindex == (XLWIP_CONFIG_N_TX_DESC - 1)) {
*temp = 0xC0000000;
} else {
*temp = 0x80000000;
}
dsb();
p = (struct pbuf *)tx_pbufs_storage[index + bdindex];
if (p != NULL) {
pbuf_free(p);
}
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
notifyinfo[tx_task_notifier_index + bdindex] = 0;
#endif
tx_pbufs_storage[index + bdindex] = 0;
curbdpntr = XEmacPs_BdRingNext(txring, curbdpntr);
n_pbufs_freed--;
dsb();
}
status = XEmacPs_BdRingFree(txring, n_bds, txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Failure while freeing in Tx Done ISR\r\n"));
}
}
return;
}
void emacps_send_handler(void *arg)
{
struct xemac_s *xemac;
xemacpsif_s *xemacpsif;
XEmacPs_BdRing *txringptr;
u32_t regval;
#if !NO_SYS
xInsideISR++;
#endif
xemac = (struct xemac_s *)(arg);
xemacpsif = (xemacpsif_s *)(xemac->state);
txringptr = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_TXSR_OFFSET);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,XEMACPS_TXSR_OFFSET, regval);
/* If Transmit done interrupt is asserted, process completed BD's */
xemacps_process_sent_bds(xemacpsif, txringptr);
#if !NO_SYS
xInsideISR--;
#endif
}
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p,
u32_t block_till_tx_complete, u32_t *to_block_index)
#else
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
#endif
{
struct pbuf *q;
s32_t n_pbufs;
XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
XEmacPs_Bd *temp_txbd;
XStatus status;
XEmacPs_BdRing *txring;
u32_t bdindex = 0;
u32_t index;
u32_t max_fr_size;
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
u32_t tx_task_notifier_index;
#endif
txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
index = get_base_index_txpbufsstorage (xemacpsif);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
tx_task_notifier_index = get_base_index_tasknotifyinfo (xemacpsif);
#endif
/* first count the number of pbufs */
for (q = p, n_pbufs = 0; q != NULL; q = q->next)
n_pbufs++;
/* obtain as many BD's */
status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
return XST_FAILURE;
}
for(q = p, txbd = txbdset; q != NULL; q = q->next) {
bdindex = XEMACPS_BD_TO_INDEX(txring, txbd);
if (tx_pbufs_storage[index + bdindex] != 0) {
LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
return XST_FAILURE;
}
/* Send the data from the pbuf to the interface, one pbuf at a
time. The size of the data in each pbuf is kept in the ->len
variable. */
if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
Xil_DCacheFlushRange((UINTPTR)q->payload, (UINTPTR)q->len);
}
XEmacPs_BdSetAddressTx(txbd, (UINTPTR)q->payload);
#ifdef ZYNQMP_USE_JUMBO
max_fr_size = MAX_FRAME_SIZE_JUMBO - 18;
#else
max_fr_size = XEMACPS_MAX_FRAME_SIZE - 18;
#endif
if (q->len > max_fr_size)
XEmacPs_BdSetLength(txbd, max_fr_size & 0x3FFF);
else
XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);
tx_pbufs_storage[index + bdindex] = (UINTPTR)q;
pbuf_ref(q);
last_txbd = txbd;
XEmacPs_BdClearLast(txbd);
txbd = XEmacPs_BdRingNext(txring, txbd);
}
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (block_till_tx_complete == 1) {
notifyinfo[tx_task_notifier_index + bdindex] = 1;
*to_block_index = tx_task_notifier_index + bdindex;
}
#endif
XEmacPs_BdSetLast(last_txbd);
/* For fragmented packets, remember the 1st BD allocated for the 1st
packet fragment. The used bit for this BD should be cleared at the end
after clearing out used bits for other fragments. For packets without
just remember the allocated BD. */
temp_txbd = txbdset;
txbd = txbdset;
txbd = XEmacPs_BdRingNext(txring, txbd);
q = p->next;
for(; q != NULL; q = q->next) {
XEmacPs_BdClearTxUsed(txbd);
txbd = XEmacPs_BdRingNext(txring, txbd);
}
XEmacPs_BdClearTxUsed(temp_txbd);
dsb();
status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
return XST_FAILURE;
}
/* Start transmit */
XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET,
(XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));
return status;
}
void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring)
{
XEmacPs_Bd *rxbd;
XStatus status;
struct pbuf *p;
u32_t freebds;
u32_t bdindex;
u32 *temp;
u32_t index;
index = get_base_index_rxpbufsstorage (xemacpsif);
freebds = XEmacPs_BdRingGetFreeCnt (rxring);
while (freebds > 0) {
freebds--;
#ifdef ZYNQMP_USE_JUMBO
p = pbuf_alloc(PBUF_RAW, MAX_FRAME_SIZE_JUMBO, PBUF_POOL);
#else
p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
#endif
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
xil_printf("unable to alloc pbuf in recv_handler\r\n");
return;
}
status = XEmacPs_BdRingAlloc(rxring, 1, &rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
pbuf_free(p);
return;
}
status = XEmacPs_BdRingToHw(rxring, 1, rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
if (status == XST_DMA_SG_LIST_ERROR) {
LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n"));
}
else {
LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));
}
pbuf_free(p);
XEmacPs_BdRingUnAlloc(rxring, 1, rxbd);
return;
}
#ifdef ZYNQMP_USE_JUMBO
if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)MAX_FRAME_SIZE_JUMBO);
}
#else
if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)XEMACPS_MAX_FRAME_SIZE);
}
#endif
bdindex = XEMACPS_BD_TO_INDEX(rxring, rxbd);
temp = (u32 *)rxbd;
temp++;
/* Status field should be cleared first to avoid drops */
*temp = 0;
dsb();
/* Set high address when required */
#ifdef __aarch64__
XEmacPs_BdWrite(rxbd, XEMACPS_BD_ADDR_HI_OFFSET,
(((UINTPTR)p->payload) & ULONG64_HI_MASK) >> 32U);
#endif
/* Set address field; add WRAP bit on last descriptor */
if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
XEmacPs_BdWrite(rxbd, XEMACPS_BD_ADDR_OFFSET, ((UINTPTR)p->payload | XEMACPS_RXBUF_WRAP_MASK));
} else {
XEmacPs_BdWrite(rxbd, XEMACPS_BD_ADDR_OFFSET, (UINTPTR)p->payload);
}
rx_pbufs_storage[index + bdindex] = (UINTPTR)p;
}
}
void emacps_recv_handler(void *arg)
{
struct pbuf *p;
XEmacPs_Bd *rxbdset, *curbdptr;
struct xemac_s *xemac;
xemacpsif_s *xemacpsif;
XEmacPs_BdRing *rxring;
volatile s32_t bd_processed;
s32_t rx_bytes, k;
u32_t bdindex;
u32_t regval;
u32_t index;
u32_t gigeversion;
xemac = (struct xemac_s *)(arg);
xemacpsif = (xemacpsif_s *)(xemac->state);
rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps);
#if !NO_SYS
xInsideISR++;
#endif
gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
index = get_base_index_rxpbufsstorage (xemacpsif);
/*
* If Reception done interrupt is asserted, call RX call back function
* to handle the processed BDs and then raise the according flag.
*/
regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET, regval);
if (gigeversion <= 2) {
resetrx_on_no_rxdata(xemacpsif);
}
while(1) {
bd_processed = XEmacPs_BdRingFromHwRx(rxring, XLWIP_CONFIG_N_RX_DESC, &rxbdset);
if (bd_processed <= 0) {
break;
}
for (k = 0, curbdptr=rxbdset; k < bd_processed; k++) {
bdindex = XEMACPS_BD_TO_INDEX(rxring, curbdptr);
p = (struct pbuf *)rx_pbufs_storage[index + bdindex];
/*
* Adjust the buffer size to the actual number of bytes received.
*/
#ifdef ZYNQMP_USE_JUMBO
rx_bytes = XEmacPs_GetRxFrameSize(&xemacpsif->emacps, curbdptr);
#else
rx_bytes = XEmacPs_BdGetLength(curbdptr);
#endif
pbuf_realloc(p, rx_bytes);
/* Invalidate RX frame before queuing to handle
* L1 cache prefetch conditions on any architecture.
*/
if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
Xil_DCacheInvalidateRange((UINTPTR)p->payload, rx_bytes);
}
/* store it in the receive queue,
* where it'll be processed by a different handler
*/
if (pq_enqueue(xemacpsif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
}
curbdptr = XEmacPs_BdRingNext( rxring, curbdptr);
}
/* free up the BD's */
XEmacPs_BdRingFree(rxring, bd_processed, rxbdset);
setup_rx_bds(xemacpsif, rxring);
}
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
return;
}
void clean_dma_txdescs(struct xemac_s *xemac)
{
XEmacPs_Bd bdtemplate;
XEmacPs_BdRing *txringptr;
xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
XEmacPs_BdClear(&bdtemplate);
XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
/*
* Create the TxBD ring
*/
XEmacPs_BdRingCreate(txringptr, (UINTPTR) xemacpsif->tx_bdspace,
(UINTPTR) xemacpsif->tx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_TX_DESC);
XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
}
XStatus init_dma(struct xemac_s *xemac)
{
XEmacPs_Bd bdtemplate;
XEmacPs_BdRing *rxringptr, *txringptr;
XEmacPs_Bd *rxbd;
struct pbuf *p;
XStatus status;
s32_t i;
u32_t bdindex;
volatile UINTPTR tempaddress;
u32_t index;
u32_t gigeversion;
XEmacPs_Bd *bdtxterminate = NULL;
XEmacPs_Bd *bdrxterminate = NULL;
u32 *temp;
xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
index = get_base_index_rxpbufsstorage (xemacpsif);
gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
/*
* The BDs need to be allocated in uncached memory. Hence the 1 MB
* address range allocated for Bd_Space is made uncached
* by setting appropriate attributes in the translation table.
* The Bd_Space is aligned to 1MB and has a size of 1 MB. This ensures
* a reserved uncached area used only for BDs.
*/
if (bd_space_attr_set == 0) {
#if defined (ARMR5)
Xil_SetTlbAttributes((s32_t)emac_bd_space, STRONG_ORDERD_SHARED | PRIV_RW_USER_RW); // addr, attr
#else
#if defined __aarch64__
Xil_SetTlbAttributes((u64)emac_bd_space, NORM_NONCACHE | INNER_SHAREABLE);
#else
Xil_SetTlbAttributes((s32_t)emac_bd_space, DEVICE_MEMORY); // addr, attr
#endif
#endif
bd_space_attr_set = 1;
}
rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));
/* Allocate 64k for Rx and Tx bds each to take care of extreme cases */
tempaddress = (UINTPTR)&(emac_bd_space[bd_space_index]);
xemacpsif->rx_bdspace = (void *)tempaddress;
bd_space_index += 0x10000;
tempaddress = (UINTPTR)&(emac_bd_space[bd_space_index]);
xemacpsif->tx_bdspace = (void *)tempaddress;
bd_space_index += 0x10000;
if (gigeversion > 2) {
tempaddress = (UINTPTR)&(emac_bd_space[bd_space_index]);
bdrxterminate = (XEmacPs_Bd *)tempaddress;
bd_space_index += 0x10000;
tempaddress = (UINTPTR)&(emac_bd_space[bd_space_index]);
bdtxterminate = (XEmacPs_Bd *)tempaddress;
bd_space_index += 0x10000;
}
LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: %p \r\n", xemacpsif->rx_bdspace));
LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: %p \r\n", xemacpsif->tx_bdspace));
if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
__FILE__, __LINE__);
return ERR_IF;
}
/*
* Setup RxBD space.
*
* Setup a BD template for the Rx channel. This template will be copied to
* every RxBD. We will not have to explicitly set these again.
*/
XEmacPs_BdClear(&bdtemplate);
/*
* Create the RxBD ring
*/
status = XEmacPs_BdRingCreate(rxringptr, (UINTPTR) xemacpsif->rx_bdspace,
(UINTPTR) xemacpsif->rx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_RX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
return ERR_IF;
}
status = XEmacPs_BdRingClone(rxringptr, &bdtemplate, XEMACPS_RECV);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
return ERR_IF;
}
XEmacPs_BdClear(&bdtemplate);
XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
/*
* Create the TxBD ring
*/
status = XEmacPs_BdRingCreate(txringptr, (UINTPTR) xemacpsif->tx_bdspace,
(UINTPTR) xemacpsif->tx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_TX_DESC);
if (status != XST_SUCCESS) {
return ERR_IF;
}
/* We reuse the bd template, as the same one will work for both rx and tx. */
status = XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
if (status != XST_SUCCESS) {
return ERR_IF;
}
/*
* Allocate RX descriptors, 1 RxBD at a time.
*/
for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
#ifdef ZYNQMP_USE_JUMBO
p = pbuf_alloc(PBUF_RAW, MAX_FRAME_SIZE_JUMBO, PBUF_POOL);
#else
p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
#endif
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
xil_printf("unable to alloc pbuf in init_dma\r\n");
return ERR_IF;
}
status = XEmacPs_BdRingAlloc(rxringptr, 1, &rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
pbuf_free(p);
return ERR_IF;
}
/* Enqueue to HW */
status = XEmacPs_BdRingToHw(rxringptr, 1, rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
pbuf_free(p);
XEmacPs_BdRingUnAlloc(rxringptr, 1, rxbd);
return ERR_IF;
}
bdindex = XEMACPS_BD_TO_INDEX(rxringptr, rxbd);
temp = (u32 *)rxbd;
*temp = 0;
if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
*temp = 0x00000002;
}
temp++;
*temp = 0;
dsb();
#ifdef ZYNQMP_USE_JUMBO
if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)MAX_FRAME_SIZE_JUMBO);
}
#else
if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)XEMACPS_MAX_FRAME_SIZE);
}
#endif
XEmacPs_BdSetAddressRx(rxbd, (UINTPTR)p->payload);
rx_pbufs_storage[index + bdindex] = (UINTPTR)p;
}
XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.RxBdRing.BaseBdAddr, 0, XEMACPS_RECV);
if (gigeversion > 2) {
XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, 1, XEMACPS_SEND);
}else {
XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, 0, XEMACPS_SEND);
}
if (gigeversion > 2)
{
/*
* This version of GEM supports priority queuing and the current
* driver is using tx priority queue 1 and normal rx queue for
* packet transmit and receive. The below code ensure that the
* other queue pointers are parked to known state for avoiding
* the controller to malfunction by fetching the descriptors
* from these queues.
*/
XEmacPs_BdClear(bdrxterminate);
XEmacPs_BdSetAddressRx(bdrxterminate, (XEMACPS_RXBUF_NEW_MASK |
XEMACPS_RXBUF_WRAP_MASK));
XEmacPs_Out32((xemacpsif->emacps.Config.BaseAddress + XEMACPS_RXQ1BASE_OFFSET),
(UINTPTR)bdrxterminate);
XEmacPs_BdClear(bdtxterminate);
XEmacPs_BdSetStatus(bdtxterminate, (XEMACPS_TXBUF_USED_MASK |
XEMACPS_TXBUF_WRAP_MASK));
XEmacPs_Out32((xemacpsif->emacps.Config.BaseAddress + XEMACPS_TXQBASE_OFFSET),
(UINTPTR)bdtxterminate);
}
#if !NO_SYS
#ifdef SDT
xPortInstallInterruptHandler(xemacpsif->emacps.Config.IntrId,
( Xil_InterruptHandler ) XEmacPs_IntrHandler,
(void *)&xemacpsif->emacps);
#else
xPortInstallInterruptHandler(xtopologyp->scugic_emac_intr,
( Xil_InterruptHandler ) XEmacPs_IntrHandler,
(void *)&xemacpsif->emacps);
#endif
#else
#ifndef SDT
/*
* Connect the device driver handler that will be called when an
* interrupt for the device occurs, the handler defined above performs
* the specific interrupt processing for the device.
*/
XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
(Xil_ExceptionHandler)XEmacPs_IntrHandler,
(void *)&xemacpsif->emacps);
#endif
#endif
/*
* Enable the interrupt for emacps.
*/
#ifdef SDT
XSetupInterruptSystem(&xemacpsif->emacps, &XEmacPs_IntrHandler,
xemacpsif->emacps.Config.IntrId, xemacpsif->emacps.Config.IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
#else
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
emac_intr_num = (u32) xtopologyp->scugic_emac_intr;
#endif
return 0;
}
/*
* resetrx_on_no_rxdata():
*
* It is called at regular intervals through the API xemacpsif_resetrx_on_no_rxdata
* called by the user.
* The EmacPs has a HW bug (SI# 692601) on the Rx path for heavy Rx traffic.
* Under heavy Rx traffic because of the HW bug there are times when the Rx path
* becomes unresponsive. The workaround for it is to check for the Rx path for
* traffic (by reading the stats registers regularly). If the stats register
* does not increment for sometime (proving no Rx traffic), the function resets
* the Rx data path.
*
*/
void resetrx_on_no_rxdata(xemacpsif_s *xemacpsif)
{
u32_t regctrl;
u32_t tempcntr;
u32_t gigeversion;
gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
if (gigeversion == 2) {
tempcntr = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXCNT_OFFSET);
if ((!tempcntr) && (!(xemacpsif->last_rx_frms_cntr))) {
regctrl = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET);
regctrl &= (~XEMACPS_NWCTRL_RXEN_MASK);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET, regctrl);
regctrl = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_NWCTRL_OFFSET);
regctrl |= (XEMACPS_NWCTRL_RXEN_MASK);
XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_NWCTRL_OFFSET, regctrl);
}
xemacpsif->last_rx_frms_cntr = tempcntr;
}
}
void free_txrx_pbufs(xemacpsif_s *xemacpsif)
{
s32_t index;
s32_t index1;
struct pbuf *p;
index1 = get_base_index_txpbufsstorage (xemacpsif);
for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
if (tx_pbufs_storage[index] != 0) {
p = (struct pbuf *)tx_pbufs_storage[index];
pbuf_free(p);
tx_pbufs_storage[index] = 0;
}
}
index1 = get_base_index_rxpbufsstorage(xemacpsif);
for (index = index1; index < (index1 + XLWIP_CONFIG_N_RX_DESC); index++) {
p = (struct pbuf *)rx_pbufs_storage[index];
pbuf_free(p);
}
}
void free_onlytx_pbufs(xemacpsif_s *xemacpsif)
{
s32_t index;
s32_t index1;
struct pbuf *p;
index1 = get_base_index_txpbufsstorage (xemacpsif);
for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
if (tx_pbufs_storage[index] != 0) {
p = (struct pbuf *)tx_pbufs_storage[index];
pbuf_free(p);
tx_pbufs_storage[index] = 0;
}
}
}
/* reset Tx and Rx DMA pointers after XEmacPs_Stop */
void reset_dma(struct xemac_s *xemac)
{
u8 txqueuenum;
u32_t gigeversion;
xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
XEmacPs_BdRing *txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
XEmacPs_BdRing *rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
XEmacPs_BdRingPtrReset(txringptr, xemacpsif->tx_bdspace);
XEmacPs_BdRingPtrReset(rxringptr, xemacpsif->rx_bdspace);
gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
if (gigeversion > 2) {
txqueuenum = 1;
} else {
txqueuenum = 0;
}
XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.RxBdRing.BaseBdAddr, 0, XEMACPS_RECV);
XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, txqueuenum, XEMACPS_SEND);
}
#ifndef SDT
void emac_disable_intr(void)
{
XScuGic_DisableIntr(INTC_DIST_BASE_ADDR, emac_intr_num);
}
void emac_enable_intr(void)
{
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, emac_intr_num);
}
#endif

View File

@ -0,0 +1,273 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "netif/xemacpsif.h"
#include "lwipopts.h"
u32_t link_speed = 100;
extern XEmacPs_Config XEmacPs_ConfigTable[];
extern u32_t phymapemac0[32];
extern u32_t phymapemac1[32];
extern u32_t phyaddrforemac;
#if !NO_SYS
extern long xInsideISR;
#endif
XEmacPs_Config *xemacps_lookup_config(unsigned mac_base)
{
XEmacPs_Config *cfgptr = NULL;
s32_t i;
for (i = 0; i < XPAR_XEMACPS_NUM_INSTANCES; i++) {
if (XEmacPs_ConfigTable[i].BaseAddress == mac_base) {
cfgptr = &XEmacPs_ConfigTable[i];
break;
}
}
return (cfgptr);
}
void init_emacps(xemacpsif_s *xemacps, struct netif *netif)
{
XEmacPs *xemacpsp;
s32_t status = XST_SUCCESS;
u32_t i;
u32_t phyfoundforemac0 = FALSE;
u32_t phyfoundforemac1 = FALSE;
xemacpsp = &xemacps->emacps;
#ifdef ZYNQMP_USE_JUMBO
XEmacPs_SetOptions(xemacpsp, XEMACPS_JUMBO_ENABLE_OPTION);
#endif
#ifdef LWIP_IGMP
XEmacPs_SetOptions(xemacpsp, XEMACPS_MULTICAST_OPTION);
#endif
#ifdef SGMII_FIXED_LINK
XEmacPs_SetOptions(xemacpsp, XEMACPS_SGMII_ENABLE_OPTION);
status = XEmacPs_ReadReg(xemacpsp->Config.BaseAddress, XEMACPS_PCS_CONTROL_OFFSET);
status &= ~XEMACPS_PCS_CON_AUTO_NEG_MASK;
XEmacPs_WriteReg(xemacps->emacps.Config.BaseAddress, XEMACPS_PCS_CONTROL_OFFSET, status);
#endif
/* set mac address */
status = XEmacPs_SetMacAddress(xemacpsp, (void*)(netif->hwaddr), 1);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("In %s:Emac Mac Address set failed...\r\n",__func__));
}
XEmacPs_SetMdioDivisor(xemacpsp, MDC_DIV_224);
/* Please refer to file header comments for the file xemacpsif_physpeed.c
* to know more about the PHY programming sequence.
* For PCS PMA core, phy_setup_emacps is called with the predefined PHY address
* exposed through xaparemeters.h
* For RGMII case, assuming multiple PHYs can be present on the MDIO bus,
* detect_phy is called to get the addresses of the PHY present on
* a particular MDIO bus (emac0 or emac1). This address map is populated
* in phymapemac0 or phymapemac1.
* phy_setup_emacps is then called for each PHY present on the MDIO bus.
*/
#ifndef SGMII_FIXED_LINK
detect_phy(xemacpsp);
for (i = 31; i > 0; i--) {
if (xemacpsp->Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
if (phymapemac0[i] == TRUE) {
link_speed = phy_setup_emacps(xemacpsp, i);
phyfoundforemac0 = TRUE;
phyaddrforemac = i;
}
} else {
if (phymapemac1[i] == TRUE) {
link_speed = phy_setup_emacps(xemacpsp, i);
phyfoundforemac1 = TRUE;
phyaddrforemac = i;
}
}
}
/* If no PHY was detected, use broadcast PHY address of 0 */
if (xemacpsp->Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
if (phyfoundforemac0 == FALSE)
link_speed = phy_setup_emacps(xemacpsp, 0);
} else {
if (phyfoundforemac1 == FALSE)
link_speed = phy_setup_emacps(xemacpsp, 0);
}
#else
link_speed = pcs_setup_emacps(xemacpsp);
#endif
if (link_speed == XST_FAILURE) {
xemacps->eth_link_status = ETH_LINK_DOWN;
xil_printf("Phy setup failure %s \n\r",__func__);
return;
} else {
xemacps->eth_link_status = ETH_LINK_UP;
}
XEmacPs_SetOperatingSpeed(xemacpsp, link_speed);
/* Setting the operating speed of the MAC needs a delay. */
{
volatile s32_t wait;
for (wait=0; wait < 20000; wait++);
}
}
void init_emacps_on_error (xemacpsif_s *xemacps, struct netif *netif)
{
XEmacPs *xemacpsp;
s32_t status = XST_SUCCESS;
xemacpsp = &xemacps->emacps;
/* set mac address */
status = XEmacPs_SetMacAddress(xemacpsp, (void*)(netif->hwaddr), 1);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("In %s:Emac Mac Address set failed...\r\n",__func__));
}
XEmacPs_SetOperatingSpeed(xemacpsp, link_speed);
/* Setting the operating speed of the MAC needs a delay. */
{
volatile s32_t wait;
for (wait=0; wait < 20000; wait++);
}
}
void setup_isr (struct xemac_s *xemac)
{
xemacpsif_s *xemacpsif;
xemacpsif = (xemacpsif_s *)(xemac->state);
/*
* Setup callbacks
*/
XEmacPs_SetHandler(&xemacpsif->emacps, XEMACPS_HANDLER_DMASEND,
(void *) emacps_send_handler,
(void *) xemac);
XEmacPs_SetHandler(&xemacpsif->emacps, XEMACPS_HANDLER_DMARECV,
(void *) emacps_recv_handler,
(void *) xemac);
XEmacPs_SetHandler(&xemacpsif->emacps, XEMACPS_HANDLER_ERROR,
(void *) emacps_error_handler,
(void *) xemac);
}
void start_emacps (xemacpsif_s *xemacps)
{
/* start the temac */
XEmacPs_Start(&xemacps->emacps);
}
void restart_emacps_transmitter (xemacpsif_s *xemacps) {
u32_t Reg;
Reg = XEmacPs_ReadReg(xemacps->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET);
Reg = Reg & (~XEMACPS_NWCTRL_TXEN_MASK);
XEmacPs_WriteReg(xemacps->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET, Reg);
Reg = XEmacPs_ReadReg(xemacps->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET);
Reg = Reg | (XEMACPS_NWCTRL_TXEN_MASK);
XEmacPs_WriteReg(xemacps->emacps.Config.BaseAddress,
XEMACPS_NWCTRL_OFFSET, Reg);
}
void emacps_error_handler(void *arg,u8 Direction, u32 ErrorWord)
{
struct xemac_s *xemac;
xemacpsif_s *xemacpsif;
XEmacPs_BdRing *rxring;
XEmacPs_BdRing *txring;
#if !NO_SYS
xInsideISR++;
#endif
xemac = (struct xemac_s *)(arg);
xemacpsif = (xemacpsif_s *)(xemac->state);
rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps);
txring = &XEmacPs_GetTxRing(&xemacpsif->emacps);
if (ErrorWord != 0) {
switch (Direction) {
case XEMACPS_RECV:
if (ErrorWord & XEMACPS_RXSR_HRESPNOK_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Receive DMA error\r\n"));
HandleEmacPsError(xemac);
}
if (ErrorWord & XEMACPS_RXSR_RXOVR_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Receive over run\r\n"));
emacps_recv_handler(arg);
setup_rx_bds(xemacpsif, rxring);
}
if (ErrorWord & XEMACPS_RXSR_BUFFNA_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Receive buffer not available\r\n"));
emacps_recv_handler(arg);
setup_rx_bds(xemacpsif, rxring);
}
break;
case XEMACPS_SEND:
if (ErrorWord & XEMACPS_TXSR_HRESPNOK_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Transmit DMA error\r\n"));
HandleEmacPsError(xemac);
}
if (ErrorWord & XEMACPS_TXSR_URUN_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Transmit under run\r\n"));
HandleTxErrors(xemac);
}
if (ErrorWord & XEMACPS_TXSR_BUFEXH_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Transmit buffer exhausted\r\n"));
HandleTxErrors(xemac);
}
if (ErrorWord & XEMACPS_TXSR_RXOVR_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Transmit retry excessed limits\r\n"));
HandleTxErrors(xemac);
}
if (ErrorWord & XEMACPS_TXSR_FRAMERX_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("Transmit collision\r\n"));
xemacps_process_sent_bds(xemacpsif, txring);
}
break;
}
}
#if !NO_SYS
xInsideISR--;
#endif
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XEMACPSIF_HW_H_
#define __XEMACPSIF_HW_H_
#include "netif/xemacpsif.h"
#include "lwip/netif.h"
#ifdef __cplusplus
extern "C" {
#endif
XEmacPs_Config * lookup_config(unsigned mac_base);
void init_emacps(xemacpsif_s *xemacpsif, struct netif *netif);
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,93 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include <stdlib.h>
#include "netif/xpqueue.h"
#define NUM_QUEUES 2
pq_queue_t pq_queue[NUM_QUEUES];
pq_queue_t *
pq_create_queue()
{
static int i;
pq_queue_t *q = NULL;
if (i >= NUM_QUEUES) {
LWIP_DEBUGF(NETIF_DEBUG, ("ERR: Max Queues allocated\n\r"));
return q;
}
q = &pq_queue[i++];
if (!q)
return q;
q->head = q->tail = q->len = 0;
return q;
}
int
pq_enqueue(pq_queue_t *q, void *p)
{
if (q->len == PQ_QUEUE_SIZE)
return -1;
q->data[q->head] = p;
q->head = (q->head + 1)%PQ_QUEUE_SIZE;
q->len++;
return 0;
}
void*
pq_dequeue(pq_queue_t *q)
{
int ptail;
if (q->len == 0)
return NULL;
ptail = q->tail;
q->tail = (q->tail + 1)%PQ_QUEUE_SIZE;
q->len--;
return q->data[ptail];
}
int
pq_qlength(pq_queue_t *q)
{
return q->len;
}

View File

@ -56,6 +56,8 @@ void mission(void);
void initFreeRTOSHelper();
void testEth();
int main(void) {
/* Configure the hardware ready to run the demo. */
@ -63,6 +65,8 @@ int main(void) {
// printf("Booting Software\n");
testEth();
mission();
}

View File

@ -1,3 +1,4 @@
add_subdirectory(libsrc/emacps)
add_subdirectory(libsrc/gpiops)
add_subdirectory(libsrc/scugic)
add_subdirectory(libsrc/scutimer)

View File

@ -7,7 +7,7 @@
/* Definition for CPU ID */
#define XPAR_CPU_ID 0U
#define USE_AMP 0U
//#define USE_AMP 0U TODO having this to 0 breaks xil_cache
/* Definitions for peripheral PS7_CORTEXA9_0 */
#define XPAR_PS7_CORTEXA9_0_CPU_CLK_FREQ_HZ 666666687

View File

@ -0,0 +1,9 @@
target_sources(${TARGET_NAME} PUBLIC
src/xemacps_bdring.c
src/xemacps.c
src/xemacps_control.c
src/xemacps_g.c
src/xemacps_hw.c
src/xemacps_intr.c
src/xemacps_sinit.c
)

212
bsp_z7/testEth.c Normal file
View File

@ -0,0 +1,212 @@
/*
* Copyright (C) 2017 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2023 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "lwip/inet.h"
#include "lwip/init.h"
#include "lwip/udp.h"
#include "lwip/timeouts.h"
#include "netif/xadapter.h"
#include "xil_printf.h"
#include "xparameters.h"
#include <sleep.h>
#include <FreeRTOS.h>
#include <task.h>
#define PLATFORM_EMAC_BASEADDR 0xE000B000
#if LWIP_DHCP == 1
#include "lwip/dhcp.h"
extern volatile int dhcp_timoutcntr;
err_t dhcp_start(struct netif *netif);
#endif
#ifdef XPS_BOARD_ZCU102
#if defined(XPAR_XIICPS_0_DEVICE_ID) || defined(XPAR_XIICPS_0_BASEADDR)
int IicPhyReset(void);
#endif
#endif
void print_app_header();
void start_application();
#define THREAD_STACKSIZE 1024
#define DEFAULT_IP_ADDRESS "192.168.1.10"
#define DEFAULT_IP_MASK "255.255.255.0"
#define DEFAULT_GW_ADDRESS "192.168.1.1"
struct netif server_netif;
u32_t sys_now(void) { return xTaskGetTickCount() * portTICK_PERIOD_MS; }
static void print_ip(char *msg, ip_addr_t *ip) {
xil_printf(msg);
xil_printf("%d.%d.%d.%d\n\r", ip4_addr1(ip), ip4_addr2(ip), ip4_addr3(ip),
ip4_addr4(ip));
}
static void print_ip_settings(ip_addr_t *ip, ip_addr_t *mask, ip_addr_t *gw) {
print_ip("Board IP: ", ip);
print_ip("Netmask : ", mask);
print_ip("Gateway : ", gw);
}
static void assign_default_ip(ip_addr_t *ip, ip_addr_t *mask, ip_addr_t *gw) {
int err;
xil_printf("Configuring default IP %s \r\n", DEFAULT_IP_ADDRESS);
err = inet_aton(DEFAULT_IP_ADDRESS, ip);
if (!err)
xil_printf("Invalid default IP address: %d\r\n", err);
err = inet_aton(DEFAULT_IP_MASK, mask);
if (!err)
xil_printf("Invalid default IP MASK: %d\r\n", err);
err = inet_aton(DEFAULT_GW_ADDRESS, gw);
if (!err)
xil_printf("Invalid default gateway address: %d\r\n", err);
}
void network_thread() {
#if LWIP_DHCP == 1
int mscnt = 0;
#endif
/* the mac address of the board. this should be unique per board */
u8_t mac_ethernet_address[] = {0x00, 0x0a, 0x35, 0x00, 0x01, 0x02};
/* Add network interface to the netif_list, and set it as default */
if (!xemac_add(&server_netif, NULL, NULL, NULL, mac_ethernet_address,
PLATFORM_EMAC_BASEADDR)) {
xil_printf("Error adding N/W interface\r\n");
return;
}
netif_set_default(&server_netif);
/* specify that the network if is up */
netif_set_up(&server_netif);
// /* start packet receive thread - required for lwIP operation */
// sys_thread_new("xemacif_input_thread",
// (void(*)(void*))xemacif_input_thread, &server_netif,
// THREAD_STACKSIZE, DEFAULT_THREAD_PRIO);
#if LWIP_DHCP == 1
dhcp_start(&server_netif);
while (1) {
vTaskDelay(DHCP_FINE_TIMER_MSECS / portTICK_RATE_MS);
dhcp_fine_tmr();
mscnt += DHCP_FINE_TIMER_MSECS;
if (mscnt >= DHCP_COARSE_TIMER_SECS * 1000) {
dhcp_coarse_tmr();
mscnt = 0;
}
}
#else
// vTaskDelete(NULL);
#endif
}
int testEth() {
#if LWIP_DHCP == 1
int mscnt = 0;
#endif
#ifdef XPS_BOARD_ZCU102
IicPhyReset();
#endif
xil_printf("\n\r\n\r");
xil_printf("-----lwIP Socket Mode UDP Client Application------\r\n");
/* initialize lwIP before calling sys_thread_new */
lwip_init();
/* any thread using lwIP should be created using sys_thread_new */
network_thread();
#if LWIP_DHCP == 1
while (1) {
vTaskDelay(DHCP_FINE_TIMER_MSECS / portTICK_RATE_MS);
if (server_netif.ip_addr.addr) {
xil_printf("DHCP request success\r\n");
break;
}
mscnt += DHCP_FINE_TIMER_MSECS;
if (mscnt >= 10000) {
xil_printf("ERROR: DHCP request timed out\r\n");
assign_default_ip(&(server_netif.ip_addr), &(server_netif.netmask),
&(server_netif.gw));
break;
}
}
#else
assign_default_ip(&(server_netif.ip_addr), &(server_netif.netmask),
&(server_netif.gw));
#endif
print_ip_settings(&(server_netif.ip_addr), &(server_netif.netmask),
&(server_netif.gw));
xil_printf("\r\n");
/* print all application headers */
// print_app_header();
xil_printf("\r\n");
uint8_t data[] = {'1','2','3','4','5'};
struct pbuf* tx = pbuf_alloc_reference(data, sizeof(data), PBUF_REF);
ip_addr_t addr = IPADDR4_INIT_BYTES(192,168,1,5);
struct udp_pcb *udpecho_raw_pcb = udp_new();
xil_printf("pcb: %p\r\n", udpecho_raw_pcb);
udp_sendto(udpecho_raw_pcb, tx, &addr, 1177);
while (1) {
// slipif_rxbyte_input() is private, so we use slipif_poll and implement
// sio_tryread()
// sio_tryread() will do a blocking read with a timeout, so we get to check
// the timeouts even if no data is incoming
//slipif_poll(&netif);
sys_check_timeouts();
}
pbuf_free(tx);
/* start the application*/
// start_application();
return 0;
}