refactoring, added second binary for sim interface, added second lwip implementation for sim interface

This commit is contained in:
Ulrich Mohr 2024-09-30 17:38:37 +02:00
parent 8fa5cddc23
commit 944e45cad2
69 changed files with 537 additions and 4936 deletions

View File

@ -17,8 +17,6 @@ set(FreeRTOS_PATH FreeRTOS-Kernel/)
set(MISSION_PATH mission/)
set (LWIP_DIR contrib/lwip)
# ##############################################################################
# Configuration
@ -44,26 +42,8 @@ endif()
add_executable(${TARGET_NAME})
# lwip
set (LWIP_INCLUDE_DIRS
"${LWIP_DIR}/src/include"
"${LWIP_DIR}/src/include/compat/posix"
"bsp_z7/lwip/include"
#${LWIP_DIR}/contrib/ports/freertos/include
)
include(${LWIP_DIR}/src/Filelists.cmake)
set(lwip_SRCS
${lwipcore_SRCS}
${lwipcore4_SRCS}
${lwipcore6_SRCS}
${lwipnetif_SRCS}
${lwipapi_SRCS}
#${LWIP_DIR}/contrib/ports/freertos/sys_arch.c
#${LWIP_DIR}/src/netif/slipif.c
#${LWIP_DIR}/src/apps/tftp/tftp.c
)
if(${CMAKE_CROSSCOMPILING})
add_library(lwip ${lwip_SRCS})
target_include_directories(lwip PUBLIC ${LWIP_INCLUDE_DIRS})
set (LWIP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/contrib/lwip)
endif()
# Add freeRTOS
@ -79,8 +59,7 @@ if(${CMAKE_CROSSCOMPILING})
INTERFACE
projCOVERAGE_TEST=0)
target_include_directories(
freertos_config INTERFACE bsp_z7/ps7_cortexa9_0/include)
freertos_config INTERFACE bsp_z7/ps7_cortexa9_0/include)
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL armv7a-none-eabihf)
# our compiler options, will trickle down through the project
target_compile_options(freertos_config INTERFACE -c -fmessage-length=0 -g -O0 -mcpu=cortex-a9 -mfpu=vfpv3 -mfloat-abi=hard -ffunction-sections -fdata-sections)
@ -109,17 +88,20 @@ add_subdirectory(common)
add_subdirectory(${MISSION_PATH})
add_subdirectory(mission_rust)
if(${CMAKE_CROSSCOMPILING})
add_subdirectory(sim_interface)
endif()
# ##############################################################################
# Post-Sources preparation
# ##############################################################################
# Add libraries for all sources.
if(${CMAKE_CROSSCOMPILING})
target_link_libraries(lwip PUBLIC freertos_kernel)
target_link_libraries(${TARGET_NAME} PUBLIC lwip)
endif()
target_link_libraries(${TARGET_NAME} PUBLIC freertos_kernel mission_rust)
target_link_libraries(${TARGET_NAME} PUBLIC bsp freertos_kernel lwip mission_rust)
# target_include_directories(
# ${TARGET_NAME} PUBLIC ${BSP_PATH})

View File

@ -20,6 +20,16 @@ If you have one around, load bitstream at startup (go get a coffee, takes time w
openocd -f board/digilent_zedboard.cfg -c "init" -c "pld load 0 system.bit"
```
If multiple zedboards are connected, openocd takes parameters to select which zedboard to use, as well as to configure the gdb port and disable telnet and tcl:
```sh
openocd -f board/digilent_zedboard.cfg -c "adapter serial #####; gdb_port 3334; telnet_port disabled; tcl_port disabled"
```
The adapter serial can be found by running `lsusb -vvv`, looking for the FTDI FT232H device with `iManufacturer: Digilent`. The serial is reported as `iSerial`.
```sh
lsusb -vvv | grep Digilent -A1 -B2
```
3. To use JTAG Boot for the OBSW, you first need to run the FSBL once.
On build PC (adapt IP if different from debugging PC) in the folder where you build the FSBL / in the fsbl-compiled repository/submodule folder:
@ -53,4 +63,4 @@ arm-none-eabi-gdb romeo-obsw -iex "target extended-remote localhost:3333" -ex "s
_Consider removing the `-ex "cont"` to start the software manually when you have et up your serial monitor.
UART USB port should output something at `115200baud`, (I use moserial to monitor).
UART USB port should output something at `115200baud`, (I use moserial to monitor). If using our project's `ps7_init.c` USB UART1 is configured to `230400baud`, UART0 to `1000000baud`.

View File

@ -1,9 +1,14 @@
add_library(bsp)
add_subdirectory(freeRTOS)
add_subdirectory(ps7_cortexa9_0)
add_subdirectory(lwip)
add_subdirectory(newlib)
add_subdirectory(hardware)
add_subdirectory(eth)
target_sources(${TARGET_NAME} PRIVATE main.c testEth.c)
target_sources(bsp PRIVATE main.c)
target_link_libraries(bsp PUBLIC freertos_kernel)
#target_sources(${TARGET_NAME} PRIVATE testEth.c)

View File

@ -1,5 +0,0 @@
target_sources(${TARGET_NAME} PRIVATE
xethernet.c
)
target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)

View File

@ -1 +1 @@
target_sources(${TARGET_NAME} PRIVATE FreeRTOS_asm_vectors.S FreeRTOS_tick_config.c)
target_sources(bsp PUBLIC FreeRTOS_asm_vectors.S FreeRTOS_tick_config.c)

View File

@ -1 +1 @@
target_sources(${TARGET_NAME} PRIVATE interfaces.c uart.c)
target_sources(bsp PRIVATE interfaces.c uart.c)

View File

@ -1,2 +1,10 @@
add_subdirectory(netif)
add_subdirectory(port)
include(${LWIP_DIR}/src/Filelists.cmake)
add_subdirectory(xilinx_eth)
add_subdirectory(bare)
target_link_libraries(lwip_xil PUBLIC freertos_kernel)
# should not need freeRTOS as it is NO_SYS
#target_link_libraries(lwip PUBLIC freertos_kernel)

8
bsp_z7/lwip/README.md Normal file
View File

@ -0,0 +1,8 @@
We provide two lwip configurations, bare and xilinx_eth. If the cmake option `ROMEO_Z7_USE_XIL_ETH` is selected, the xilinx_eth variant is used, otherwise the bare variant is used.
Bare is the bare-metal minimal implementation. It tries to reduce code size and system complexity, using the bare API and a custom networking thread, handling all networking aspects.
This configuration is meant to be run on the satellite obsw.
Xilinx_eth supports the xilinx ethernet core of the Zynq PS, inlcuding DMA. Unfortunately, the xilinx drivers require a custom sys_arch port.
The xilinx_eth configuration is meant to be used with posix compatible calls, using the lwip sockets api and multiple threads running in the background.
This configuration is meant to be run on debugging configurations or support equipment.

View File

@ -0,0 +1,19 @@
set (LWIP_INCLUDE_DIRS
"${LWIP_DIR}/src/include"
"${LWIP_DIR}/src/include/compat/posix"
"include"
)
set(lwip_SRCS
${lwipcore_SRCS}
${lwipcore4_SRCS}
${lwipcore6_SRCS}
${lwipnetif_SRCS}
${lwipapi_SRCS}
#${LWIP_DIR}/contrib/ports/freertos/sys_arch.c
#${LWIP_DIR}/src/netif/slipif.c
#${LWIP_DIR}/src/apps/tftp/tftp.c
)
add_library(lwip ${lwip_SRCS})
target_include_directories(lwip PUBLIC ${LWIP_INCLUDE_DIRS})

View File

@ -0,0 +1,336 @@
/*
* Copyright (c) 2001-2003 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#ifndef LWIP_LWIPOPTS_H
#define LWIP_LWIPOPTS_H
#define LWIP_IPV4 1
#define LWIP_IPV6 0
#define NO_SYS 1
#define LWIP_SOCKET 0
#define LWIP_NETCONN 0
#define LWIP_NETIF_API 0
#define LWIP_IGMP 0
#define LWIP_ICMP LWIP_IPV4
#define LWIP_SNMP 0
//#define MIB2_STATS LWIP_SNMP
#ifdef LWIP_HAVE_MBEDTLS
#define LWIP_SNMP_V3 (LWIP_SNMP)
#endif
#define LWIP_DNS 0
#define LWIP_MDNS_RESPONDER 0
#define LWIP_NUM_NETIF_CLIENT_DATA (LWIP_MDNS_RESPONDER)
#define LWIP_HAVE_LOOPIF 0
#define LWIP_NETIF_LOOPBACK 0
#define LWIP_LOOPBACK_MAX_PBUFS 0
#define TCP_LISTEN_BACKLOG 0
#define LWIP_COMPAT_SOCKETS 0
//TODO put this into the OS
#define LWIP_POSIX_SOCKETS_IO_NAMES 0
#define LWIP_SO_RCVTIMEO 0
#define LWIP_SO_RCVBUF 0
//TODO use this with LWIP_POSIX_SOCKETS_IO_NAMES
// TODO why is 100 not working...
#define LWIP_SOCKET_OFFSET 10
#define LWIP_SOCKET_SELECT 0
#define LWIP_SOCKET_POLL 0
#define LWIP_TCPIP_CORE_LOCKING 0
#define LWIP_NETIF_LINK_CALLBACK 1
#define LWIP_NETIF_STATUS_CALLBACK 1
#define LWIP_NETIF_EXT_STATUS_CALLBACK 1
//#define LWIP_DEBUG
#ifdef LWIP_DEBUG
#define LWIP_DBG_MIN_LEVEL LWIP_DBG_LEVEL_ALL
#define PPP_DEBUG LWIP_DBG_OFF
#define MEM_DEBUG LWIP_DBG_OFF
#define MEMP_DEBUG LWIP_DBG_OFF
#define PBUF_DEBUG LWIP_DBG_ON
#define API_LIB_DEBUG LWIP_DBG_ON
#define API_MSG_DEBUG LWIP_DBG_ON
#define TCPIP_DEBUG LWIP_DBG_ON
#define NETIF_DEBUG LWIP_DBG_ON
#define SOCKETS_DEBUG LWIP_DBG_ON
#define DNS_DEBUG LWIP_DBG_ON
#define AUTOIP_DEBUG LWIP_DBG_ON
#define DHCP_DEBUG LWIP_DBG_ON
#define IP_DEBUG LWIP_DBG_ON
#define IP_REASS_DEBUG LWIP_DBG_ON
#define ICMP_DEBUG LWIP_DBG_ON
#define IGMP_DEBUG LWIP_DBG_ON
#define UDP_DEBUG LWIP_DBG_ON
#define TCP_DEBUG LWIP_DBG_OFF
#define TCP_INPUT_DEBUG LWIP_DBG_OFF
#define TCP_OUTPUT_DEBUG LWIP_DBG_OFF
#define TCP_RTO_DEBUG LWIP_DBG_OFF
#define TCP_CWND_DEBUG LWIP_DBG_OFF
#define TCP_WND_DEBUG LWIP_DBG_OFF
#define TCP_FR_DEBUG LWIP_DBG_OFF
#define TCP_QLEN_DEBUG LWIP_DBG_OFF
#define TCP_RST_DEBUG LWIP_DBG_OFF
#define TIMERS_DEBUG LWIP_DBG_OFF
#define LWIP_DEBUG_TIMERNAMES LWIP_DBG_OFF
#endif
#define LWIP_DBG_TYPES_ON (LWIP_DBG_ON|LWIP_DBG_TRACE|LWIP_DBG_STATE|LWIP_DBG_FRESH|LWIP_DBG_HALT)
/* ---------- Memory options ---------- */
/* MEM_ALIGNMENT: should be set to the alignment of the CPU for which
lwIP is compiled. 4 byte alignment -> define MEM_ALIGNMENT to 4, 2
byte alignment -> define MEM_ALIGNMENT to 2. */
/* MSVC port: intel processors don't need 4-byte alignment,
but are faster that way! */
//TODO documentation
//Zynq needs 32 for DMA to work (something about cache I guess...)
#define MEM_ALIGNMENT 32U
#define MEM_USE_POOLS 1
#define MEMP_USE_CUSTOM_POOLS MEM_USE_POOLS
/* MEM_SIZE: the size of the heap memory. If the application will send
a lot of data that needs to be copied, this should be set high. */
#define MEM_SIZE 102400
/* MEMP_NUM_PBUF: the number of memp struct pbufs. If the application
sends a lot of data out of ROM (or other static memory), this
should be set high. */
#define MEMP_NUM_PBUF 16
/* MEMP_NUM_RAW_PCB: the number of UDP protocol control blocks. One
per active RAW "connection". */
#define MEMP_NUM_RAW_PCB 3
/* MEMP_NUM_UDP_PCB: the number of UDP protocol control blocks. One
per active UDP "connection". */
#define MEMP_NUM_UDP_PCB 8
/* MEMP_NUM_TCP_PCB: the number of simulatenously active TCP
connections. */
#define MEMP_NUM_TCP_PCB 5
/* MEMP_NUM_TCP_PCB_LISTEN: the number of listening TCP
connections. */
#define MEMP_NUM_TCP_PCB_LISTEN 8
/* MEMP_NUM_TCP_SEG: the number of simultaneously queued TCP
segments. */
#define MEMP_NUM_TCP_SEG 16
/* MEMP_NUM_SYS_TIMEOUT: the number of simulateously active
timeouts. */
#define MEMP_NUM_SYS_TIMEOUT 17
/* The following four are used only with the sequential API and can be
set to 0 if the application only will use the raw API. */
/* MEMP_NUM_NETBUF: the number of struct netbufs. */
#define MEMP_NUM_NETBUF 2
/* MEMP_NUM_NETCONN: the number of struct netconns. */
#define MEMP_NUM_NETCONN 12
/* MEMP_NUM_TCPIP_MSG_*: the number of struct tcpip_msg, which is used
for sequential API communication and incoming packets. Used in
src/api/tcpip.c. */
#define MEMP_NUM_TCPIP_MSG_API 16
#define MEMP_NUM_TCPIP_MSG_INPKT 16
#define TCPIP_THREAD_STACKSIZE 10240
/* ---------- Pbuf options ---------- */
/* PBUF_POOL_SIZE: the number of buffers in the pbuf pool. */
#define PBUF_POOL_SIZE 120
/* PBUF_POOL_BUFSIZE: the size of each pbuf in the pbuf pool. */
/* Needs to be large enough to fit eth MTU to be able to use DMA for xemacps rx*/
#define PBUF_POOL_BUFSIZE 1600
/** SYS_LIGHTWEIGHT_PROT
* define SYS_LIGHTWEIGHT_PROT in lwipopts.h if you want inter-task protection
* for certain critical regions during buffer allocation, deallocation and memory
* allocation and deallocation.
*/
#define SYS_LIGHTWEIGHT_PROT 0
/* ---------- TCP options ---------- */
#define LWIP_TCP 0
#define TCP_TTL 255
#define LWIP_ALTCP (LWIP_TCP)
#ifdef LWIP_HAVE_MBEDTLS
#define LWIP_ALTCP_TLS (LWIP_TCP)
#define LWIP_ALTCP_TLS_MBEDTLS (LWIP_TCP)
#endif
/* Controls if TCP should queue segments that arrive out of
order. Define to 0 if your device is low on memory. */
#define TCP_QUEUE_OOSEQ 1
/* TCP Maximum segment size. */
#define TCP_MSS 1024
/* TCP sender buffer space (bytes). */
#define TCP_SND_BUF 2048
/* TCP sender buffer space (pbufs). This must be at least = 2 *
TCP_SND_BUF/TCP_MSS for things to work. */
#define TCP_SND_QUEUELEN (4 * TCP_SND_BUF/TCP_MSS)
/* TCP writable space (bytes). This must be less than or equal
to TCP_SND_BUF. It is the amount of space which must be
available in the tcp snd_buf for select to return writable */
#define TCP_SNDLOWAT (TCP_SND_BUF/2)
/* TCP receive window. */
#define TCP_WND (20 * 1024)
/* Maximum number of retransmissions of data segments. */
#define TCP_MAXRTX 12
/* Maximum number of retransmissions of SYN segments. */
#define TCP_SYNMAXRTX 4
#define TCPIP_MBOX_SIZE 5
/* ---------- ARP options ---------- */
#define LWIP_ARP 1
#define ARP_TABLE_SIZE 10
#define ARP_QUEUEING 1
#define ARP_QUEUE_LEN 10
/* ---------- IP options ---------- */
/* Define IP_FORWARD to 1 if you wish to have the ability to forward
IP packets across network interfaces. If you are going to run lwIP
on a device with only one network interface, define this to 0. */
#define IP_FORWARD 1
/* IP reassembly and segmentation.These are orthogonal even
* if they both deal with IP fragments */
#define IP_REASSEMBLY 1
#define IP_REASS_MAX_PBUFS (10 * ((1500 + PBUF_POOL_BUFSIZE - 1) / PBUF_POOL_BUFSIZE))
#define MEMP_NUM_REASSDATA IP_REASS_MAX_PBUFS
#define IP_FRAG 1
#define IPV6_FRAG_COPYHEADER 1
/* ---------- ICMP options ---------- */
#define ICMP_TTL 255
/* ---------- DHCP options ---------- */
/* Define LWIP_DHCP to 1 if you want DHCP configuration of
interfaces. */
#define LWIP_DHCP 0
/* 1 if you want to do an ARP check on the offered address
(recommended). */
#define DHCP_DOES_ARP_CHECK (LWIP_DHCP)
/* ---------- AUTOIP options ------- */
#define LWIP_AUTOIP (LWIP_DHCP)
#define LWIP_DHCP_AUTOIP_COOP (LWIP_DHCP && LWIP_AUTOIP)
/* ---------- UDP options ---------- */
#define LWIP_UDP 1
#define LWIP_UDPLITE 0
#define UDP_TTL 255
#define DEFAULT_UDP_RECVMBOX_SIZE 4
/* ---------- RAW options ---------- */
#define LWIP_RAW 0
#define DEFAULT_TCP_RECVMBOX_SIZE 2
/* ---------- Statistics options ---------- */
#define LWIP_STATS 0
#define LWIP_STATS_DISPLAY 0
#if LWIP_STATS
#define LINK_STATS 1
#define IP_STATS 1
#define ICMP_STATS 1
#define IGMP_STATS 1
#define IPFRAG_STATS 1
#define UDP_STATS 1
#define TCP_STATS 1
#define MEM_STATS 1
#define MEMP_STATS 1
#define PBUF_STATS 1
#define SYS_STATS 1
#endif /* LWIP_STATS */
/* ---------- NETBIOS options ---------- */
#define LWIP_NETBIOS_RESPOND_NAME_QUERY 0
/* ---------- PPP options ---------- */
#define PPP_SUPPORT 0 /* Set > 0 for PPP */
#if PPP_SUPPORT
#define NUM_PPP 1 /* Max PPP sessions. */
/* Select modules to enable. Ideally these would be set in the makefile but
* we're limited by the command line length so you need to modify the settings
* in this file.
*/
#define PPPOE_SUPPORT 1
#define PPPOS_SUPPORT 1
#define PAP_SUPPORT 1 /* Set > 0 for PAP. */
#define CHAP_SUPPORT 1 /* Set > 0 for CHAP. */
#define MSCHAP_SUPPORT 0 /* Set > 0 for MSCHAP */
#define CBCP_SUPPORT 0 /* Set > 0 for CBCP (NOT FUNCTIONAL!) */
#define CCP_SUPPORT 0 /* Set > 0 for CCP */
#define VJ_SUPPORT 0 /* Set > 0 for VJ header compression. */
#define MD5_SUPPORT 1 /* Set > 0 for MD5 (see also CHAP) */
#endif /* PPP_SUPPORT */
// Disable slip task
#define SLIP_USE_RX_THREAD 0
#endif /* LWIP_LWIPOPTS_H */

View File

@ -1,752 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include <stdio.h>
#include <string.h>
#include <xparameters.h>
#include "xlwipconfig.h"
#include "lwip/opt.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/pbuf.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "lwip/igmp.h"
#include "netif/etharp.h"
#include "netif/xaxiemacif.h"
#include "netif/xadapter.h"
#include "netif/xpqueue.h"
#include "xaxiemacif_fifo.h"
#include "xaxiemacif_hw.h"
#include "xparameters.h"
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#else
#include "xintc.h"
#endif
#else
#include "xinterrupt_wrap.h"
#endif
#if LWIP_IPV6
#include "lwip/ethip6.h"
#endif
/* Define those to better describe your network interface. */
#define IFNAME0 't'
#define IFNAME1 'e'
#if LWIP_IGMP
static err_t xaxiemacif_mac_filter_update (struct netif *netif,
ip_addr_t *group, u8_t action);
static u8_t xaxiemac_mcast_entry_mask = 0;
#endif
#if LWIP_IPV6 && LWIP_IPV6_MLD
#define XAXIEMAC_MAX_MAC_ADDR 4
static err_t xaxiemacif_mld6_mac_filter_update (struct netif *netif,
ip_addr_t *group, u8_t action);
static u8_t xaxiemac_mld6_mcast_entry_mask;
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
extern volatile u32_t notifyinfo[XLWIP_CONFIG_N_TX_DESC];
#endif
/*
* this function is always called with interrupts off
* this function also assumes that there are available BD's
*/
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
static err_t _unbuffered_low_level_output(xaxiemacif_s *xaxiemacif,
struct pbuf *p, u32_t block_till_tx_complete, u32_t *to_block_index )
#else
static err_t _unbuffered_low_level_output(xaxiemacif_s *xaxiemacif,
struct pbuf *p)
#endif
{
XStatus status = 0;
err_t err = ERR_MEM;
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
if (XAxiEthernet_IsDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (block_till_tx_complete == 1) {
status = axidma_sgsend(xaxiemacif, p, 1, to_block_index);
} else {
status = axidma_sgsend(xaxiemacif, p, 0, to_block_index);
}
#else
status = axidma_sgsend(xaxiemacif, p);
#endif
#endif
} else if (XAxiEthernet_IsMcDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
xil_printf("lwip support with mcdma is deprecated\n");
status = axi_mcdma_sgsend(xaxiemacif, p);
#endif
} else {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
status = axififo_send(xaxiemacif, p);
#endif
}
if (status != XST_SUCCESS) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
} else {
err = ERR_OK;
}
#if ETH_PAD_SIZE
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif
#if LINK_STATS
lwip_stats.link.xmit++;
#endif /* LINK_STATS */
return err;
}
/*
* low_level_output():
*
* Should do the actual transmission of the packet. The packet is
* contained in the pbuf that is passed to the function. This pbuf
* might be chained.
*
*/
static err_t low_level_output(struct netif *netif, struct pbuf *p)
{
err_t err = ERR_MEM;
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
u32_t notfifyblocksleepcntr;
u32_t to_block_index;
#endif
SYS_ARCH_DECL_PROTECT(lev);
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
/*
* With AXI Ethernet on Zynq, we observed unexplained delays for
* BD Status update. As a result, we are hitting a condition where
* there are no BDs free to transmit packets. So, we have added
* this logic where we look for the status update in a definite
* loop.
*/
XAxiDma_BdRing *txring = XAxiDma_GetTxRing(&xaxiemacif->axidma);
#endif
int count = 100;
SYS_ARCH_PROTECT(lev);
while (count) {
/* check if space is available to send */
if (xaxiemac_is_tx_space_available(xaxiemacif)) {
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
err = _unbuffered_low_level_output(xaxiemacif, p, 1, &to_block_index);
break;
} else {
err = _unbuffered_low_level_output(xaxiemacif, p, 0, &to_block_index);
break;
}
#else
err = _unbuffered_low_level_output(xaxiemacif, p);
break;
#endif
} else {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
process_sent_bds(txring);
#endif
count--;
}
}
if (count == 0) {
xil_printf("pack dropped, no space\r\n");
SYS_ARCH_UNPROTECT(lev);
goto return_pack_dropped;
}
SYS_ARCH_UNPROTECT(lev);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
/* Wait for approx 1 second before timing out */
notfifyblocksleepcntr = 900000;
while(notifyinfo[to_block_index] == 1) {
usleep(1);
notfifyblocksleepcntr--;
if (notfifyblocksleepcntr <= 0) {
err = ERR_TIMEOUT;
break;
}
}
}
netif_clear_opt_block_tx(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET);
#endif
return_pack_dropped:
return err;
}
/*
* low_level_input():
*
* Should allocate a pbuf and transfer the bytes of the incoming
* packet from the interface into the pbuf.
*
*/
static struct pbuf *low_level_input(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
struct pbuf *p;
/* see if there is data to process */
if (pq_qlength(xaxiemacif->recv_q) == 0)
return NULL;
/* return one packet from receive q */
p = (struct pbuf *)pq_dequeue(xaxiemacif->recv_q);
return p;
}
/*
* xaxiemacif_output():
*
* This function is called by the TCP/IP stack when an IP packet
* should be sent. It calls the function called low_level_output() to
* do the actual transmission of the packet.
*
*/
static err_t xaxiemacif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr)
{
/* resolve hardware address, then send (or queue) packet */
return etharp_output(netif, p, ipaddr);
}
/*
* xaxiemacif_input():
*
* This function should be called when a packet is ready to be read
* from the interface. It uses the function low_level_input() that
* should handle the actual reception of bytes from the network
* interface.
*
* Returns the number of packets read (max 1 packet on success,
* 0 if there are no packets)
*
*/
int xaxiemacif_input(struct netif *netif)
{
struct eth_hdr *ethhdr;
struct pbuf *p;
SYS_ARCH_DECL_PROTECT(lev);
#if !NO_SYS
while (1)
#endif
{
/* move received packet into a new pbuf */
SYS_ARCH_PROTECT(lev);
p = low_level_input(netif);
SYS_ARCH_UNPROTECT(lev);
/* no packet could be read, silently ignore this */
if (p == NULL)
return 0;
/* points to packet payload, which starts with an Ethernet header */
ethhdr = p->payload;
#if LINK_STATS
lwip_stats.link.recv++;
#endif /* LINK_STATS */
switch (htons(ethhdr->type)) {
/* IP or ARP packet? */
case ETHTYPE_IP:
case ETHTYPE_ARP:
#if LWIP_IPV6
/*IPv6 Packet?*/
case ETHTYPE_IPV6:
#endif
#if PPPOE_SUPPORT
/* PPPoE packet? */
case ETHTYPE_PPPOEDISC:
case ETHTYPE_PPPOE:
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (netif->input(p, netif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_input: IP input error\r\n"));
pbuf_free(p);
p = NULL;
}
break;
default:
pbuf_free(p);
p = NULL;
break;
}
}
return 1;
}
static err_t low_level_init(struct netif *netif)
{
unsigned mac_address = (unsigned)(UINTPTR)(netif->state);
struct xemac_s *xemac;
xaxiemacif_s *xaxiemacif;
XAxiEthernet_Config *mac_config;
xaxiemacif = mem_malloc(sizeof *xaxiemacif);
if (xaxiemacif == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac = mem_malloc(sizeof *xemac);
if (xemac == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac->state = (void *)xaxiemacif;
xemac->topology_index = xtopology_find_index(mac_address);
xemac->type = xemac_type_axi_ethernet;
xaxiemacif->send_q = NULL;
xaxiemacif->recv_q = pq_create_queue();
if (!xaxiemacif->recv_q)
return ERR_MEM;
/* maximum transfer unit */
#ifdef USE_JUMBO_FRAMES
netif->mtu = XAE_JUMBO_MTU - XAE_HDR_SIZE;
#else
netif->mtu = XAE_MTU - XAE_HDR_SIZE;
#endif
#if LWIP_IGMP
netif->igmp_mac_filter = xaxiemacif_mac_filter_update;
#endif
#if LWIP_IPV6 && LWIP_IPV6_MLD
netif->mld_mac_filter = xaxiemacif_mld6_mac_filter_update;
#endif
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP |
NETIF_FLAG_LINK_UP;
#if LWIP_IPV6 && LWIP_IPV6_MLD
netif->flags |= NETIF_FLAG_MLD6;
#endif
#if LWIP_IGMP
netif->flags |= NETIF_FLAG_IGMP;
#endif
#if !NO_SYS
sys_sem_new(&xemac->sem_rx_data_available, 0);
#endif
/* obtain config of this emac */
mac_config = xaxiemac_lookup_config((unsigned)(UINTPTR)netif->state);
XAxiEthernet_Initialize(&xaxiemacif->axi_ethernet, mac_config,
mac_config->BaseAddress);
#ifdef XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT
enable_sgmii_clock(&xaxiemacif->axi_ethernet);
#endif
/* figure out if the system has DMA */
if (XAxiEthernet_IsDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_DMA
/* initialize the DMA engine */
init_axi_dma(xemac);
#endif
} else if (XAxiEthernet_IsFifo(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_FIFO
/* initialize the locallink FIFOs */
init_axi_fifo(xemac);
#endif
} else if (XAxiEthernet_IsMcDma(&xaxiemacif->axi_ethernet)) {
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET_MCDMA
/* Initialize MCDMA engine */
init_axi_mcdma(xemac);
#endif
} else {
/* should not occur */
LWIP_DEBUGF(NETIF_DEBUG, ("xaxiemacif_init: mac is not configured with DMA, MCDMA or FIFO\r\n"));
return ERR_IF;
}
/* initialize the mac */
init_axiemac(xaxiemacif, netif);
/* replace the state in netif (currently the emac baseaddress)
* with the mac instance pointer.
*/
netif->state = (void *)xemac;
return ERR_OK;
}
#if LWIP_IPV6 && LWIP_IPV6_MLD
static u8_t xaxiemacif_ip6_addr_ismulticast(ip6_addr_t* ip_addr)
{
if(ip6_addr_ismulticast_linklocal(ip_addr)||
ip6_addr_ismulticast_iflocal(ip_addr) ||
ip6_addr_ismulticast_adminlocal(ip_addr)||
ip6_addr_ismulticast_sitelocal(ip_addr) ||
ip6_addr_ismulticast_orglocal(ip_addr) ||
ip6_addr_ismulticast_global(ip_addr)) {
/*Return TRUE if IPv6 is Multicast type*/
return TRUE;
} else {
return FALSE;
}
}
static void xaxiemacif_mld6_mac_hash_update (struct netif *netif, u8_t *ip_addr,
u8_t action,u8_t entry)
{
u8_t multicast_mac_addr[6];
u8_t multicast_mac_addr_to_clr[6];
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
if (action == NETIF_ADD_MAC_FILTER) {
/* Set Mulitcast mac address in hash table */
multicast_mac_addr[0] = LL_IP6_MULTICAST_ADDR_0;
multicast_mac_addr[1] = LL_IP6_MULTICAST_ADDR_1;
multicast_mac_addr[2] = ip_addr[12];
multicast_mac_addr[3] = ip_addr[13];
multicast_mac_addr[4] = ip_addr[14];
multicast_mac_addr[5] = ip_addr[15];
XAxiEthernet_Stop(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastAdd(&xaxiemacif->axi_ethernet,multicast_mac_addr, entry);
XAxiEthernet_Start(&xaxiemacif->axi_ethernet);
} else if (action == NETIF_DEL_MAC_FILTER) {
/* Remove Mulitcast mac address in hash table */
XAxiEthernet_MulticastGet(&xaxiemacif->axi_ethernet,multicast_mac_addr_to_clr, entry);
XAxiEthernet_Stop(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastClear(&xaxiemacif->axi_ethernet, entry);
XAxiEthernet_Start(&xaxiemacif->axi_ethernet);
}
}
static err_t
xaxiemacif_mld6_mac_filter_update (struct netif *netif, ip_addr_t *group,
u8_t action)
{
u8_t temp_mask;
unsigned int i;
u8_t entry;
u8_t * ip_addr = (u8_t *) group;
if(!(xaxiemacif_ip6_addr_ismulticast((ip6_addr_t*) ip_addr))) {
LWIP_DEBUGF(NETIF_DEBUG,
("%s: The requested MAC address is not a multicast address.\r\n", __func__)); LWIP_DEBUGF(NETIF_DEBUG,
("Multicast address add operation failure !!\r\n"));
return ERR_ARG;
}
if (action == NETIF_ADD_MAC_FILTER) {
for (i = 0; i < XAXIEMAC_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mld6_mcast_entry_mask & temp_mask) == temp_mask) {
continue;
}
entry = i;
xaxiemac_mld6_mcast_entry_mask |= temp_mask;
/* Update mac address in hash table */
xaxiemacif_mld6_mac_hash_update(netif, ip_addr, action,entry);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully added.\r\n", __func__));
return ERR_OK;
}
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers left.\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address add operation failure !!\r\n"));
return ERR_MEM;
} else if (action == NETIF_DEL_MAC_FILTER) {
for (i = 0; i < XAXIEMAC_MAX_MAC_ADDR; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mld6_mcast_entry_mask & temp_mask) == temp_mask) {
entry = i;
xaxiemacif_mld6_mac_hash_update(netif, ip_addr,action, entry);
xaxiemac_mld6_mcast_entry_mask &= (~temp_mask);
LWIP_DEBUGF(NETIF_DEBUG,
("%s: Multicast MAC address successfully removed.\r\n", __func__));
return ERR_OK;
} else {
continue;
}
}
LWIP_DEBUGF(NETIF_DEBUG,
("%s: No multicast address registers present with\r\n", __func__));
LWIP_DEBUGF(NETIF_DEBUG,
("the requested Multicast MAC address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
("Multicast MAC address removal failure!!.\r\n"));
return ERR_MEM;
}
return ERR_ARG;
}
#endif
#if LWIP_IGMP
static err_t
xaxiemacif_mac_filter_update (struct netif *netif, ip_addr_t *group,
u8_t action)
{
err_t return_val = ERR_OK;
u8_t multicast_mac_addr[6];
u8_t multicast_mac_addr_to_clr[6];
u8_t temp_mask;
int entry;
int i;
u8_t * ip_addr_temp = (u8_t *)group;
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
if (action == IGMP_ADD_MAC_FILTER) {
if ((ip_addr_temp[0] >= 224) && (ip_addr_temp[0] <= 239)) {
if (xaxiemac_mcast_entry_mask >= 0x0F) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: No multicast address registers left.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast MAC address add operation failure !!\r\n"));
return_val = ERR_MEM;
} else {
for (i = 0; i < 4; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mcast_entry_mask &
temp_mask) == temp_mask) {
continue;
} else {
entry = i;
xaxiemac_mcast_entry_mask
|= temp_mask;
multicast_mac_addr[0] = 0x01;
multicast_mac_addr[1] = 0x00;
multicast_mac_addr[2] = 0x5E;
multicast_mac_addr[3] =
ip_addr_temp[1] & 0x7F;
multicast_mac_addr[4] =
ip_addr_temp[2];
multicast_mac_addr[5] =
ip_addr_temp[3];
XAxiEthernet_Stop
(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastAdd
(&xaxiemacif->axi_ethernet,
multicast_mac_addr,entry);
XAxiEthernet_Start
(&xaxiemacif->axi_ethernet);
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: Multicast MAC address successfully added.\r\n"));
return_val = ERR_OK;
break;
}
}
if (i == 4) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: No multicast address registers left.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast MAC address add operation failure !!\r\n"));
return_val = ERR_MEM;
}
}
} else {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: The requested MAC address is not a multicast address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast address add operation failure !!\r\n"));
return_val = ERR_ARG;
}
} else if (action == IGMP_DEL_MAC_FILTER) {
if ((ip_addr_temp[0] < 224) || (ip_addr_temp[0] > 239)) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: The requested MAC address is not a multicast address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast address add operation failure !!\r\n"));
return_val = ERR_ARG;
} else {
for (i = 0; i < 4; i++) {
temp_mask = (0x01) << i;
if ((xaxiemac_mcast_entry_mask & temp_mask)
== temp_mask) {
XAxiEthernet_MulticastGet
(&xaxiemacif->axi_ethernet,
multicast_mac_addr_to_clr, i);
if ((ip_addr_temp[3] ==
multicast_mac_addr_to_clr[5]) &&
(ip_addr_temp[2] ==
multicast_mac_addr_to_clr[4]) &&
((ip_addr_temp[1] & 0x7f) ==
multicast_mac_addr_to_clr[3])) {
XAxiEthernet_Stop
(&xaxiemacif->axi_ethernet);
XAxiEthernet_MulticastClear
(&xaxiemacif->axi_ethernet, i);
XAxiEthernet_Start
(&xaxiemacif->axi_ethernet);
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: Multicast MAC address successfully removed.\r\n"));
return_val = ERR_OK;
xaxiemac_mcast_entry_mask &=
(~temp_mask);
break;
} else {
continue;
}
} else {
continue;
}
}
if (i == 4) {
LWIP_DEBUGF(NETIF_DEBUG,
("xaxiemacif_mac_filter_update: No multicast address registers present with\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" the requested Multicast MAC address.\r\n"));
LWIP_DEBUGF(NETIF_DEBUG,
(" Multicast MAC address removal failure!!.\r\n"));
return_val = ERR_MEM;
}
}
}
return return_val;
}
#endif
/*
* xaxiemacif_init():
*
* Should be called at the beginning of the program to set up the
* network interface. It calls the function low_level_init() to do the
* actual setup of the hardware.
*
*/
err_t
xaxiemacif_init(struct netif *netif)
{
#if LWIP_SNMP
/* ifType ethernetCsmacd(6) @see RFC1213 */
netif->link_type = 6;
/* your link speed here */
netif->link_speed = ;
netif->ts = 0;
netif->ifinoctets = 0;
netif->ifinucastpkts = 0;
netif->ifinnucastpkts = 0;
netif->ifindiscards = 0;
netif->ifoutoctets = 0;
netif->ifoutucastpkts = 0;
netif->ifoutnucastpkts = 0;
netif->ifoutdiscards = 0;
#endif
netif->name[0] = IFNAME0;
netif->name[1] = IFNAME1;
netif->output = xaxiemacif_output;
netif->linkoutput = low_level_output;
#if LWIP_IPV6
netif->output_ip6 = ethip6_output;
#endif
low_level_init(netif);
return ERR_OK;
}

View File

@ -1,994 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#include "lwip/sys.h"
#endif
#include "lwip/stats.h"
#include "lwip/inet_chksum.h"
#include "netif/xadapter.h"
#include "netif/xaxiemacif.h"
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#else
#include "xintc_l.h"
#endif
#else
#include "xinterrupt_wrap.h"
#ifndef XCACHE_FLUSH_DCACHE_RANGE
#define XCACHE_FLUSH_DCACHE_RANGE(data, length) \
Xil_DCacheFlushRange((UINTPTR)data, length)
#endif
#ifndef XCACHE_INVALIDATE_DCACHE_RANGE
#define XCACHE_INVALIDATE_DCACHE_RANGE(data, length) \
Xil_DCacheInvalidateRange((u32)data, length)
#endif
#endif
#include "xstatus.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#ifdef CONFIG_XTRACE
#include "xtrace.h"
#endif
#if defined __aarch64__ || defined (__arm__)
#include "xil_mmu.h"
#elif defined (ARMR5)
#include "xreg_cortexr5.h"
#endif
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#ifndef XCACHE_FLUSH_DCACHE_RANGE
#define XCACHE_FLUSH_DCACHE_RANGE(data, length) \
Xil_DCacheFlushRange((UINTPTR)data, length)
#endif
#ifndef XCACHE_INVALIDATE_DCACHE_RANGE
#define XCACHE_INVALIDATE_DCACHE_RANGE(data, length) \
Xil_DCacheInvalidateRange((u32)data, length)
#endif
#endif
/* Byte alignment of BDs */
#define BD_ALIGNMENT (XAXIDMA_BD_MINIMUM_ALIGNMENT*2)
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Function Prototypes *********************************************/
/*
* Function prototypes of the functions used for registering Fast
* Interrupt Handlers
*/
static void axidma_sendfast_handler(void) __attribute__ ((fast_interrupt));
static void axidma_recvfast_handler(void) __attribute__ ((fast_interrupt));
static void xaxiemac_errorfast_handler(void) __attribute__ ((fast_interrupt));
/**************** Variable Declarations **************************************/
/** Variables for Fast Interrupt handlers ***/
struct xemac_s *xemac_fast;
xaxiemacif_s *xaxiemacif_fast;
#endif
#if !NO_SYS
extern u32 xInsideISR;
#endif
#define BD_SIZE_2MB 0x200000
#define BD_SIZE_1MB 0x100000
#define BD_SIZE_64KB 0x10000
#if defined (__aarch64__)
u8_t bd_space[BD_SIZE_2MB] __attribute__ ((aligned (BD_SIZE_2MB)));
#elif defined (__arm__) || defined (ARMR5)
u8_t bd_space[BD_SIZE_1MB] __attribute__ ((aligned (BD_SIZE_1MB)));
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
volatile u32_t notifyinfo[XLWIP_CONFIG_N_TX_DESC];
#endif
#define XAxiDma_BD_TO_INDEX(ringptr, bdptr) \
(((UINTPTR)bdptr - (UINTPTR)(ringptr)->FirstBdAddr) / (ringptr)->Separation)
static inline void bd_csum_enable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
| 1));
}
static inline void bd_csum_disable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
& ~1));
}
static inline void bd_fullcsum_disable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
& ~3));
}
static inline void bd_fullcsum_enable(XAxiDma_Bd *bd)
{
XAxiDma_BdWrite((bd), XAXIDMA_BD_USR0_OFFSET,
(XAxiDma_BdRead((bd), XAXIDMA_BD_USR0_OFFSET)
| 2));
}
static inline void bd_csum_set(XAxiDma_Bd *bd, u16_t tx_csbegin, u16_t tx_csinsert,
u16_t tx_csinit)
{
u32_t app1;
bd_csum_enable(bd);
/* write start offset and insert offset into BD */
app1 = ((u32_t)tx_csbegin << 16) | (u32_t) tx_csinsert;
XAxiDma_BdWrite(bd, XAXIDMA_BD_USR1_OFFSET, app1);
/* insert init value */
XAxiDma_BdWrite(bd, XAXIDMA_BD_USR2_OFFSET, tx_csinit);
}
static inline u16_t extract_packet_len(XAxiDma_Bd *rxbd) {
u16_t packet_len = XAxiDma_BdRead(rxbd, XAXIDMA_BD_USR4_OFFSET) & 0x3fff;
return packet_len;
}
static inline u16_t extract_csum(XAxiDma_Bd *rxbd) {
u16_t csum = XAxiDma_BdRead(rxbd, XAXIDMA_BD_USR3_OFFSET) & 0xffff;
return csum;
}
static inline u32_t csum_sub(u32_t csum, u16_t v)
{
csum += (u32_t)v;
return csum + (csum < (u32_t)v);
}
/*
* compare if the h/w computed checksum (stored in the rxbd)
* equals the TCP checksum value in the packet
*/
s32_t is_checksum_valid(XAxiDma_Bd *rxbd, struct pbuf *p) {
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto == IP_PROTO_TCP) {
u32_t iphdr_len;
u16_t csum_in_rxbd, pseudo_csum, iphdr_csum, padding_csum;
u16_t tcp_payload_offset;
u32_t computed_csum;
u16_t padding_len, tcp_payload_len, packet_len;
u16_t csum;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_payload_len = htons(IPH_LEN(&ehdr->ip)) - IPH_HL(&ehdr->ip) * 4;
packet_len = extract_packet_len(rxbd);
padding_len = packet_len - tcp_payload_offset - tcp_payload_len;
csum_in_rxbd = extract_csum(rxbd);
pseudo_csum = htons(inet_chksum_pseudo(NULL,
proto, tcp_payload_len, (ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest));
/* xps_ll_temac computes the checksum of the packet starting at byte 14
* we need to subtract the values of the ethernet & IP headers
*/
iphdr_csum = inet_chksum(p->payload + 14, tcp_payload_offset - 14);
/* compute csum of padding bytes, if any */
padding_csum = inet_chksum(p->payload + p->tot_len - padding_len,
padding_len);
/* get the h/w checksum value */
computed_csum = (u32_t)csum_in_rxbd;
/* remove the effect of csumming the iphdr */
computed_csum = csum_sub(computed_csum, ~iphdr_csum);
/* add in the pseudo csum */
computed_csum = csum_sub(computed_csum, ~pseudo_csum);
/* remove any padding effect */
computed_csum = csum_sub(computed_csum, ~padding_csum);
/* normalize computed csum */
while (computed_csum >> 16) {
computed_csum = (computed_csum & 0xffff) + (computed_csum >> 16);
}
/* convert to 16 bits and take 1's complement */
csum = (u16_t)computed_csum;
csum = ~csum;
/* chksum is valid if: computed csum over the packet is 0 */
return !csum;
} else {
/* just say yes to all other packets */
/* the upper layers in the stack will compute and verify the checksum */
return 1;
}
}
static inline void *alloc_bdspace(int n_desc)
{
int space = XAxiDma_BdRingMemCalc(BD_ALIGNMENT, n_desc);
int padding = BD_ALIGNMENT*2;
void *unaligned_mem = mem_malloc(space + padding*4);
void *aligned_mem =
(void *)(((UINTPTR)(unaligned_mem + BD_ALIGNMENT)) & ~(BD_ALIGNMENT - 1));
#if DEBUG
assert(aligned_mem > unaligned_mem);
assert(aligned_mem + space < unaligned_mem + space + padding);
#endif
return aligned_mem;
}
static void axidma_send_handler(void *arg)
{
unsigned irq_status;
struct xemac_s *xemac;
xaxiemacif_s *xaxiemacif;
XAxiDma_BdRing *txringptr;
#if !NO_SYS
xInsideISR++;
#endif
xemac = (struct xemac_s *)(arg);
xaxiemacif = (xaxiemacif_s *)(xemac->state);
txringptr = XAxiDma_GetTxRing(&xaxiemacif->axidma);
XAxiDma_BdRingIntDisable(txringptr, XAXIDMA_IRQ_ALL_MASK);
/* Read pending interrupts */
irq_status = XAxiDma_BdRingGetIrq(txringptr);
/* Acknowledge pending interrupts */
XAxiDma_BdRingAckIrq(txringptr, irq_status);
/* If error interrupt is asserted, raise error flag, reset the
* hardware to recover from the error, and return with no further
* processing.
*/
if (irq_status & XAXIDMA_IRQ_ERROR_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: axidma error interrupt is asserted\r\n",
__FUNCTION__));
XAxiDma_Reset(&xaxiemacif->axidma);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* If Transmit done interrupt is asserted, process completed BD's */
if (irq_status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)) {
process_sent_bds(txringptr);
}
XAxiDma_BdRingIntEnable(txringptr, XAXIDMA_IRQ_ALL_MASK);
#if !NO_SYS
xInsideISR--;
#endif
}
static void setup_rx_bds(XAxiDma_BdRing *rxring)
{
XAxiDma_Bd *rxbd;
s32_t n_bds;
XStatus status;
struct pbuf *p;
u32 bdsts;
n_bds = XAxiDma_BdRingGetFreeCnt(rxring);
while (n_bds > 0) {
n_bds--;
#ifdef USE_JUMBO_FRAMES
p = pbuf_alloc(PBUF_RAW, XAE_MAX_JUMBO_FRAME_SIZE, PBUF_POOL);
#else
p = pbuf_alloc(PBUF_RAW, XAE_MAX_FRAME_SIZE, PBUF_POOL);
#endif
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
xil_printf("unable to alloc pbuf in recv_handler\r\n");
return;
}
status = XAxiDma_BdRingAlloc(rxring, 1, &rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
pbuf_free(p);
return;
}
/* Setup the BD. */
XAxiDma_BdSetBufAddr(rxbd, (UINTPTR)p->payload);
/* Clear everything but the COMPLETE bit, which is cleared when
* committed to hardware.
*/
bdsts = XAxiDma_BdGetSts(rxbd);
bdsts &= XAXIDMA_BD_STS_COMPLETE_MASK;
XAxiDma_BdWrite(rxbd, XAXIDMA_BD_STS_OFFSET, bdsts);
XAxiDma_BdSetLength(rxbd, p->len, rxring->MaxTransferLen);
XAxiDma_BdSetCtrl(rxbd, 0);
XAxiDma_BdSetId(rxbd, p);
#if !defined (__MICROBLAZE__) && !defined (__riscv)
dsb();
#endif
#ifdef USE_JUMBO_FRAMES
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_JUMBO_FRAME_SIZE);
#else
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_FRAME_SIZE);
#endif
#if !defined(__aarch64__)
XCACHE_FLUSH_DCACHE_RANGE(rxbd, sizeof *rxbd);
#endif
/* Enqueue to HW */
status = XAxiDma_BdRingToHw(rxring, 1, rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
if (status == XST_DMA_SG_LIST_ERROR) {
LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XAxiDma_BdRingAlloc()\r\n"));
}
else {
LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));
}
pbuf_free(p);
XAxiDma_BdRingUnAlloc(rxring, 1, rxbd);
return;
}
}
}
static void axidma_recv_handler(void *arg)
{
struct pbuf *p;
u32 irq_status, i, timeOut;
XAxiDma_Bd *rxbd, *rxbdset;
struct xemac_s *xemac;
xaxiemacif_s *xaxiemacif;
XAxiDma_BdRing *rxring;
#if !NO_SYS
xInsideISR++;
#endif
xemac = (struct xemac_s *)(arg);
xaxiemacif = (xaxiemacif_s *)(xemac->state);
rxring = XAxiDma_GetRxRing(&xaxiemacif->axidma);
XAxiDma_BdRingIntDisable(rxring, XAXIDMA_IRQ_ALL_MASK);
/* Read pending interrupts */
irq_status = XAxiDma_BdRingGetIrq(rxring);
/* Acknowledge pending interrupts */
XAxiDma_BdRingAckIrq(rxring, irq_status);
/* If error interrupt is asserted, raise error flag, reset the
* hardware to recover from the error, and return with no further
* processing.
*/
if ((irq_status & XAXIDMA_IRQ_ERROR_MASK)) {
setup_rx_bds(rxring);
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: axidma error interrupt is asserted\r\n",
__FUNCTION__));
XAxiDma_Reset(&xaxiemacif->axidma);
timeOut = 10000;
while (timeOut) {
if (XAxiDma_ResetIsDone(&xaxiemacif->axidma)) {
break;
}
timeOut -= 1;
}
XAxiDma_BdRingIntEnable(rxring, XAXIDMA_IRQ_ALL_MASK);
XAxiDma_Resume(&xaxiemacif->axidma);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* If Reception done interrupt is asserted, call RX call back function
* to handle the processed BDs and then raise the according flag.
*/
if (irq_status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)) {
u32 bd_processed;
u32 rx_bytes;
bd_processed = XAxiDma_BdRingFromHw(rxring, XAXIDMA_ALL_BDS, &rxbdset);
for (i = 0, rxbd = rxbdset; i < bd_processed; i++) {
p = (struct pbuf *)(UINTPTR)XAxiDma_BdGetId(rxbd);
/* Adjust the buffer size to the actual number of bytes received.*/
rx_bytes = extract_packet_len(rxbd);
pbuf_realloc(p, rx_bytes);
#if defined(__aarch64__)
#ifdef USE_JUMBO_FRAMES
XCACHE_INVALIDATE_DCACHE_RANGE(p->payload,
XAE_MAX_JUMBO_FRAME_SIZE);
#else
XCACHE_INVALIDATE_DCACHE_RANGE(p->payload, XAE_MAX_FRAME_SIZE);
#endif
#endif
#if LWIP_PARTIAL_CSUM_OFFLOAD_RX==1
/* Verify for partial checksum offload case */
if (!is_checksum_valid(rxbd, p)) {
LWIP_DEBUGF(NETIF_DEBUG, ("Incorrect csum as calculated by the hw\r\n"));
}
#endif
/* store it in the receive queue,
* where it'll be processed by a different handler
*/
if (pq_enqueue(xaxiemacif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
}
rxbd = (XAxiDma_Bd *)XAxiDma_BdRingNext(rxring, rxbd);
}
/* free up the BD's */
XAxiDma_BdRingFree(rxring, bd_processed, rxbdset);
/* return all the processed bd's back to the stack */
/* setup_rx_bds -> use XAxiDma_BdRingGetFreeCnt */
setup_rx_bds(rxring);
}
XAxiDma_BdRingIntEnable(rxring, XAXIDMA_IRQ_ALL_MASK);
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
}
s32_t xaxiemac_is_tx_space_available(xaxiemacif_s *emac)
{
XAxiDma_BdRing *txring;
txring = XAxiDma_GetTxRing(&emac->axidma);
/* tx space is available as long as there are valid BD's */
return XAxiDma_BdRingGetFreeCnt(txring);
}
s32_t process_sent_bds(XAxiDma_BdRing *txring)
{
XAxiDma_Bd *txbdset, *txbd;
int n_bds, i;
u32_t bdindex;
/* obtain a list of processed BD's */
n_bds = XAxiDma_BdRingFromHw(txring, XAXIDMA_ALL_BDS, &txbdset);
if (n_bds == 0) {
return XST_FAILURE;
}
/* free the pbuf associated with each BD */
for (i = 0, txbd = txbdset; i < n_bds; i++) {
bdindex = XAxiDma_BD_TO_INDEX(txring, txbd);
struct pbuf *p = (struct pbuf *)(UINTPTR)XAxiDma_BdGetId(txbd);
pbuf_free(p);
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
notifyinfo[bdindex] = 0;
#endif
txbd = (XAxiDma_Bd *)XAxiDma_BdRingNext(txring, txbd);
}
/* free the processed BD's */
return (XAxiDma_BdRingFree(txring, n_bds, txbdset));
}
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
XStatus axidma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p,
u32_t block_till_tx_complete, u32_t *to_block_index)
#else
XStatus axidma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p)
#endif
{
struct pbuf *q;
s32_t n_pbufs;
XAxiDma_Bd *txbdset, *txbd, *last_txbd = NULL;
XStatus status;
XAxiDma_BdRing *txring;
u32_t max_frame_size;
u32_t bdindex = 0;
#ifdef USE_JUMBO_FRAMES
max_frame_size = XAE_MAX_JUMBO_FRAME_SIZE - 18;
#else
max_frame_size = XAE_MAX_FRAME_SIZE - 18;
#endif
txring = XAxiDma_GetTxRing(&xaxiemacif->axidma);
/* first count the number of pbufs */
for (q = p, n_pbufs = 0; q != NULL; q = q->next)
n_pbufs++;
/* obtain as many BD's */
status = XAxiDma_BdRingAlloc(txring, n_pbufs, &txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
return ERR_IF;
}
for(q = p, txbd = txbdset; q != NULL; q = q->next) {
bdindex = XAxiDma_BD_TO_INDEX(txring, txbd);
/* Send the data from the pbuf to the interface, one pbuf at a
* time. The size of the data in each pbuf is kept in the ->len
* variable.
*/
XAxiDma_BdSetBufAddr(txbd, (UINTPTR)q->payload);
if (q->len > max_frame_size) {
XAxiDma_BdSetLength(txbd, max_frame_size,
txring->MaxTransferLen);
}
else {
XAxiDma_BdSetLength(txbd, q->len, txring->MaxTransferLen);
}
XAxiDma_BdSetId(txbd, (void *)q);
XAxiDma_BdSetCtrl(txbd, 0);
XCACHE_FLUSH_DCACHE_RANGE(q->payload, q->len);
pbuf_ref(q);
last_txbd = txbd;
txbd = (XAxiDma_Bd *)XAxiDma_BdRingNext(txring, txbd);
}
if (n_pbufs == 1) {
XAxiDma_BdSetCtrl(txbdset, XAXIDMA_BD_CTRL_TXSOF_MASK
| XAXIDMA_BD_CTRL_TXEOF_MASK);
} else {
/* in the first packet, set the SOP */
XAxiDma_BdSetCtrl(txbdset, XAXIDMA_BD_CTRL_TXSOF_MASK);
/* in the last packet, set the EOP */
XAxiDma_BdSetCtrl(last_txbd, XAXIDMA_BD_CTRL_TXEOF_MASK);
}
#if LWIP_FULL_CSUM_OFFLOAD_TX==1
bd_fullcsum_disable(txbdset);
if (p->len > sizeof(struct ethip_hdr)) {
bd_fullcsum_enable(txbdset);
}
#endif
#if LWIP_PARTIAL_CSUM_OFFLOAD_TX==1
bd_csum_disable(txbdset);
if (p->len > sizeof(struct ethip_hdr)) {
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto ==
IP_PROTO_TCP) {
u32_t iphdr_len, csum_insert_offset;
u16_t tcp_len; /* TCP header length + data length in bytes */
u16_t csum_init = 0;
u16_t tcp_payload_offset;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_len = p->tot_len - tcp_payload_offset;
/* insert checksum at offset 16 for TCP, 6 for UDP */
if (proto == IP_PROTO_TCP)
csum_insert_offset = tcp_payload_offset + 16;
else if (proto == IP_PROTO_UDP)
csum_insert_offset = tcp_payload_offset + 6;
else
csum_insert_offset = 0;
/* compute pseudo header checksum value */
csum_init = inet_chksum_pseudo(NULL,
(ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest, proto, tcp_len);
/* init buffer descriptor */
bd_csum_set(txbdset, tcp_payload_offset,
csum_insert_offset, htons(~csum_init));
}
}
#endif
#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
if (block_till_tx_complete == 1) {
notifyinfo[bdindex] = 1;
*to_block_index = bdindex;
}
#endif
/* enq to h/w */
return XAxiDma_BdRingToHw(txring, n_pbufs, txbdset);
}
XStatus init_axi_dma(struct xemac_s *xemac)
{
XAxiDma_Config *dmaconfig;
XAxiDma_Bd bdtemplate;
XAxiDma_BdRing *rxringptr, *txringptr;
XAxiDma_Bd *rxbd;
struct pbuf *p;
XStatus status;
u32_t i;
u32_t bd_space_index = 0;
UINTPTR baseaddr;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
#if XPAR_INTC_0_HAS_FAST == 1
xaxiemacif_fast = xaxiemacif;
xemac_fast = xemac;
#endif
#if NO_SYS
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#endif
#if !NO_SYS
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#endif
/* FIXME: On ZyqnMP Multiple Axi Ethernet are not supported */
#if defined (__aarch64__)
Xil_SetTlbAttributes((u64)bd_space, NORM_NONCACHE | INNER_SHAREABLE);
#elif defined (ARMR5)
Xil_SetTlbAttributes((s32_t)bd_space, STRONG_ORDERD_SHARED | PRIV_RW_USER_RW);
#elif defined (__arm__)
Xil_SetTlbAttributes((s32_t)bd_space, DEVICE_MEMORY);
#endif
#if defined (__MICROBLAZE__) || defined (__riscv)
xaxiemacif->rx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_RX_DESC);
xaxiemacif->tx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_TX_DESC);
#else
xaxiemacif->rx_bdspace = (void *)(UINTPTR)&(bd_space[bd_space_index]);
bd_space_index += BD_SIZE_64KB;
xaxiemacif->tx_bdspace = (void *)(UINTPTR)&(bd_space[bd_space_index]);
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n",
xaxiemacif->rx_bdspace));
LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n",
xaxiemacif->tx_bdspace));
if (!xaxiemacif->rx_bdspace || !xaxiemacif->tx_bdspace) {
xil_printf("%s@%d: Error: Unable to allocate memory for RX buffer descriptors",
__FILE__, __LINE__);
return ERR_IF;
}
/* initialize DMA */
#ifndef SDT
baseaddr = xaxiemacif->axi_ethernet.Config.AxiDevBaseAddress;
dmaconfig = XAxiDma_LookupConfigBaseAddr(baseaddr);
#else
baseaddr = xaxiemacif->axi_ethernet.AxiDevBaseAddress;
dmaconfig = XAxiDma_LookupConfig(baseaddr);
#endif
XAxiDma_CfgInitialize(&xaxiemacif->axidma, dmaconfig);
rxringptr = XAxiDma_GetRxRing(&xaxiemacif->axidma);
txringptr = XAxiDma_GetTxRing(&xaxiemacif->axidma);
LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));
/* Setup RxBD space.
* Setup a BD template for the Rx channel. This template will be copied to
* every RxBD. We will not have to explicitly set these again.
*/
XAxiDma_BdClear(&bdtemplate);
/* Create the RxBD ring */
status = XAxiDma_BdRingCreate(rxringptr, (UINTPTR) xaxiemacif->rx_bdspace,
(UINTPTR) xaxiemacif->rx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_RX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
return ERR_IF;
}
XAxiDma_BdClear(&bdtemplate);
status = XAxiDma_BdRingClone(rxringptr, &bdtemplate);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
return ERR_IF;
}
/* Create the TxBD ring */
status = XAxiDma_BdRingCreate(txringptr, (UINTPTR) xaxiemacif->tx_bdspace,
(UINTPTR) xaxiemacif->tx_bdspace, BD_ALIGNMENT,
XLWIP_CONFIG_N_TX_DESC);
if (status != XST_SUCCESS) {
return ERR_IF;
}
/* We reuse the bd template, as the same one will work for both rx and tx. */
status = XAxiDma_BdRingClone(txringptr, &bdtemplate);
if (status != XST_SUCCESS) {
return ERR_IF;
}
/* Allocate RX descriptors, 1 RxBD at a time.*/
for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
status = XAxiDma_BdRingAlloc(rxringptr, 1, &rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("init_axi_dma: Error allocating RxBD\r\n"));
return ERR_IF;
}
#ifdef USE_JUMBO_FRAMES
p = pbuf_alloc(PBUF_RAW, XAE_MAX_JUMBO_FRAME_SIZE, PBUF_POOL);
#else
p = pbuf_alloc(PBUF_RAW, XAE_MAX_FRAME_SIZE, PBUF_POOL);
#endif
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
return ERR_IF;
}
/* Setup the BD. The BD template used in the call to
* XAxiEthernet_SgSetSpace() set the "last" field of all RxBDs.
* Therefore we are not required to issue a XAxiDma_Bd_SetLast(rxbd)
* here.
*/
XAxiDma_BdSetBufAddr(rxbd, (UINTPTR)p->payload);
XAxiDma_BdSetLength(rxbd, p->len, rxringptr->MaxTransferLen);
XAxiDma_BdSetCtrl(rxbd, 0);
XAxiDma_BdSetId(rxbd, p);
#ifdef USE_JUMBO_FRAMES
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_JUMBO_FRAME_SIZE);
#else
XCACHE_FLUSH_DCACHE_RANGE((UINTPTR)p->payload, (UINTPTR)XAE_MAX_FRAME_SIZE);
#endif
#if !defined(__aarch64__)
XCACHE_FLUSH_DCACHE_RANGE(rxbd, sizeof *rxbd);
#endif
/* Enqueue to HW */
status = XAxiDma_BdRingToHw(rxringptr, 1, rxbd);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
return ERR_IF;
}
}
status = XAxiDma_BdRingSetCoalesce(txringptr, XLWIP_CONFIG_N_TX_COALESCE,
0x1);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
status = XAxiDma_BdRingSetCoalesce(rxringptr, XLWIP_CONFIG_N_RX_COALESCE,
0x1);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
/* start DMA */
status = XAxiDma_BdRingStart(txringptr);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: failed to start TX BD ring\r\n"));
return ERR_IF;
}
status = XAxiDma_BdRingStart(rxringptr);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error: failed to start RX BD ring\r\n"));
return ERR_IF;
}
/* enable DMA interrupts */
XAxiDma_BdRingIntEnable(txringptr, XAXIDMA_IRQ_ALL_MASK);
XAxiDma_BdRingIntEnable(rxringptr, XAXIDMA_IRQ_ALL_MASK);
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(Xil_ExceptionHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(Xil_ExceptionHandler)axidma_send_handler,
xemac);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(Xil_ExceptionHandler)axidma_recv_handler,
xemac);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr,
AXIETH_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
AXIDMA_TX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
AXIDMA_RX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr);
#else
#if NO_SYS
#if XPAR_INTC_0_HAS_FAST == 1
/* Register axiethernet interrupt with interrupt controller as Fast
Interrupts */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_errorfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XFastInterruptHandler)axidma_sendfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XFastInterruptHandler)axidma_recvfast_handler);
#else
/* Register axiethernet interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable DMA interrupts */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XInterruptHandler)axidma_send_handler,
xemac);
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XInterruptHandler)axidma_recv_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET);
/* form new mask enabling AXIDMA & axiethernet interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#else
#if XPAR_INTC_0_HAS_FAST == 1
/* Register axiethernet interrupt with interrupt controller as Fast
Interrupts */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_errorfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XFastInterruptHandler)axidma_sendfast_handler);
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XFastInterruptHandler)axidma_recvfast_handler);
#else
/* Register axiethernet interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable DMA interrupts */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr,
(XInterruptHandler)axidma_send_handler,
xemac);
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr,
(XInterruptHandler)axidma_recv_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET);
/* form new mask enabling AXIDMA & axiethernet interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaTxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.AxiDmaRxIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#endif
#endif
#else
XSetupInterruptSystem(&xaxiemacif->axi_ethernet, &xaxiemac_error_handler,
xaxiemacif->axi_ethernet.Config.IntrId,
xaxiemacif->axi_ethernet.Config.IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
XSetupInterruptSystem(xemac, &axidma_send_handler,
dmaconfig->IntrId[0],
dmaconfig->IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
XSetupInterruptSystem(xemac, &axidma_recv_handler,
dmaconfig->IntrId[1],
dmaconfig->IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
#endif
return 0;
}
#ifndef SDT
#if XPAR_INTC_0_HAS_FAST == 1
/****************************** Fast receive Handler *************************/
static void axidma_recvfast_handler(void)
{
axidma_recv_handler((void *)xemac_fast);
}
/****************************** Fast Send Handler ****************************/
static void axidma_sendfast_handler(void)
{
axidma_send_handler((void *)xemac_fast);
}
/****************************** Fast Error Handler ***************************/
static void xaxiemac_errorfast_handler(void)
{
xaxiemac_error_handler(&xaxiemacif_fast->axi_ethernet);
}
#endif
#endif

View File

@ -1,366 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#include "lwip/sys.h"
#endif
#include "lwip/stats.h"
#include "netif/xadapter.h"
#include "netif/xaxiemacif.h"
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#else
#include "xintc_l.h"
#endif
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#define AXIFIFO_INTR_PRIORITY_SET_IN_GIC 0xA0
#define AXIETH_INTR_PRIORITY_SET_IN_GIC 0xA0
#define TRIG_TYPE_RISING_EDGE_SENSITIVE 0x3
#define INTC_DIST_BASE_ADDR XPAR_SCUGIC_DIST_BASEADDR
#endif
#include "xstatus.h"
#include "xaxiemacif_fifo.h"
#include "xlwipconfig.h"
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Function Prototypes *********************************************/
/*
* Function prototypes of the functions used for registering Fast
* Interrupt Handlers
*/
static void xllfifo_fastintr_handler(void) __attribute__ ((fast_interrupt));
static void xaxiemac_fasterror_handler(void) __attribute__ ((fast_interrupt));
/**************** Variable Declarations **************************************/
/** Variables for Fast Interrupt handlers ***/
struct xemac_s *xemac_fast;
xaxiemacif_s *xaxiemacif_fast;
#endif
#if !NO_SYS
extern u32 xInsideISR;
#endif
int xaxiemac_is_tx_space_available(xaxiemacif_s *emac)
{
return ((XLlFifo_TxVacancy(&emac->axififo) * 4) > XAE_MAX_FRAME_SIZE);
}
static void
xllfifo_recv_handler(struct xemac_s *xemac)
{
u32_t frame_length;
struct pbuf *p;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XLlFifo *llfifo = &xaxiemacif->axififo;
/* While there is data in the fifo ... */
while (XLlFifo_RxOccupancy(llfifo)) {
/* find packet length */
frame_length = XLlFifo_RxGetLen(llfifo);
/* allocate a pbuf */
p = pbuf_alloc(PBUF_RAW, frame_length, PBUF_POOL);
if (!p) {
char tmp_frame[XAE_MAX_FRAME_SIZE];
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
/* receive and drop packet to keep data & len registers in sync */
XLlFifo_Read(llfifo, tmp_frame, frame_length);
continue;
}
/* receive packet */
XLlFifo_Read(llfifo, p->payload, frame_length);
#if ETH_PAD_SIZE
len += ETH_PAD_SIZE; /* allow room for Ethernet padding */
#endif
/* store it in the receive queue, where it'll be processed by xemacif input thread */
if (pq_enqueue(xaxiemacif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
continue;
}
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
#endif
#if LINK_STATS
lwip_stats.link.recv++;
#endif
}
}
static void
fifo_error_handler(xaxiemacif_s *xaxiemacif, u32_t pending_intr)
{
XLlFifo *llfifo = &xaxiemacif->axififo;
if (pending_intr & XLLF_INT_RPURE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Rx under-read error"));
}
if (pending_intr & XLLF_INT_RPORE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Rx over-read error"));
}
if (pending_intr & XLLF_INT_RPUE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Rx fifo empty"));
}
if (pending_intr & XLLF_INT_TPOE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Tx fifo overrun"));
}
if (pending_intr & XLLF_INT_TSE_MASK) {
LWIP_DEBUGF(NETIF_DEBUG, ("llfifo: Tx length mismatch"));
}
/* Reset the tx or rx side of the fifo as needed */
if (pending_intr & XLLF_INT_RXERROR_MASK) {
XLlFifo_IntClear(llfifo, XLLF_INT_RRC_MASK);
XLlFifo_RxReset(llfifo);
}
if (pending_intr & XLLF_INT_TXERROR_MASK) {
XLlFifo_IntClear(llfifo, XLLF_INT_TRC_MASK);
XLlFifo_TxReset(llfifo);
}
}
static void
xllfifo_intr_handler(struct xemac_s *xemac)
{
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XLlFifo *llfifo = &xaxiemacif->axififo;
u32_t pending_fifo_intr = XLlFifo_IntPending(llfifo);
#if !NO_SYS
xInsideISR++;
#endif
while (pending_fifo_intr) {
if (pending_fifo_intr & XLLF_INT_RC_MASK) {
/* receive interrupt */
XLlFifo_IntClear(llfifo, XLLF_INT_RC_MASK);
xllfifo_recv_handler(xemac);
} else if (pending_fifo_intr & XLLF_INT_TC_MASK) {
/* tx intr */
XLlFifo_IntClear(llfifo, XLLF_INT_TC_MASK);
} else {
XLlFifo_IntClear(llfifo, XLLF_INT_ALL_MASK &
~(XLLF_INT_RC_MASK |
XLLF_INT_TC_MASK));
fifo_error_handler(xaxiemacif, pending_fifo_intr);
}
pending_fifo_intr = XLlFifo_IntPending(llfifo);
}
#if !NO_SYS
xInsideISR--;
#endif
}
XStatus init_axi_fifo(struct xemac_s *xemac)
{
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
#if XPAR_INTC_0_HAS_FAST == 1
xaxiemacif_fast = xaxiemacif;
xemac_fast = xemac;
#endif
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
/* initialize ll fifo */
XLlFifo_Initialize(&xaxiemacif->axififo,
XAxiEthernet_AxiDevBaseAddress(&xaxiemacif->axi_ethernet));
/* Clear any pending FIFO interrupts */
XLlFifo_IntClear(&xaxiemacif->axififo, XLLF_INT_ALL_MASK);
/* enable fifo interrupts */
XLlFifo_IntEnable(&xaxiemacif->axififo, XLLF_INT_ALL_MASK);
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XInterruptHandler)xllfifo_intr_handler,
xemac);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr,
AXIETH_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
AXIFIFO_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr);
#else
#if NO_SYS
#if XPAR_INTC_0_HAS_FAST == 1
/* Register temac interrupt with interrupt controller */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_fasterror_handler);
/* connect & enable FIFO interrupt */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XFastInterruptHandler)xllfifo_fastintr_handler);
#else
/* Register temac interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable FIFO interrupt */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XInterruptHandler)xllfifo_intr_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr + XIN_IER_OFFSET);
/* form new mask enabling SDMA & ll_temac interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiFifoIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#else
#if XPAR_INTC_0_HAS_FAST == 1
/* Register temac interrupt with interrupt controller */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XFastInterruptHandler)xaxiemac_fasterror_handler);
/* connect & enable FIFO interrupt */
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XFastInterruptHandler)xllfifo_fastintr_handler);
#else
/* Register temac interrupt with interrupt controller */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(XInterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
/* connect & enable FIFO interrupt */
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiFifoIntr,
(XInterruptHandler)xllfifo_intr_handler,
xemac);
#endif
/* Enable EMAC interrupts in the interrupt controller */
do {
/* read current interrupt enable mask */
unsigned int cur_mask = XIntc_In32(xtopologyp->intc_baseaddr + XIN_IER_OFFSET);
/* form new mask enabling SDMA & ll_temac interrupts */
cur_mask = cur_mask
| (1 << xaxiemacif->axi_ethernet.Config.AxiFifoIntr)
| (1 << xaxiemacif->axi_ethernet.Config.TemacIntr);
/* set new mask */
XIntc_EnableIntr(xtopologyp->intc_baseaddr, cur_mask);
} while (0);
#endif
#endif
return 0;
}
XStatus axififo_send(xaxiemacif_s *xaxiemacif, struct pbuf *p)
{
XLlFifo *llfifo = &xaxiemacif->axififo;
u32_t l = 0;
struct pbuf *q;
for(q = p; q != NULL; q = q->next) {
/* write frame data to FIFO */
XLlFifo_Write(llfifo, q->payload, q->len);
l += q->len;
}
/* initiate transmit */
XLlFifo_TxSetLen(llfifo, l);
return 0;
}
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Fast Error Handler ********************************************/
void xaxiemac_fasterror_handler(void)
{
xaxiemac_error_handler(&xaxiemacif_fast->axi_ethernet);
}
/********** Fast Interrupt handler *****************************************/
void xllfifo_fastintr_handler(void)
{
xllfifo_intr_handler(xemac_fast);
}
#endif

View File

@ -1,50 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XAXIEMACIF_FIFO_H_
#define __XAXIEMACIF_FIFO_H_
#include "xparameters.h"
#include "netif/xaxiemacif.h"
#include "xlwipconfig.h"
#ifdef __cplusplus
extern "C" {
#endif
XStatus init_axi_fifo(struct xemac_s *xemac);
XStatus axififo_send(xaxiemacif_s *xaxiemacif, struct pbuf *p);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,123 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "netif/xaxiemacif.h"
#include "lwipopts.h"
XAxiEthernet_Config *xaxiemac_lookup_config(unsigned mac_base)
{
extern XAxiEthernet_Config XAxiEthernet_ConfigTable[];
XAxiEthernet_Config *CfgPtr = NULL;
unsigned int i;
for (i = 0; i < XPAR_XAXIETHERNET_NUM_INSTANCES; i++) {
if (XAxiEthernet_ConfigTable[i].BaseAddress == mac_base) {
CfgPtr = &XAxiEthernet_ConfigTable[i];
break;
}
}
return (CfgPtr);
}
void init_axiemac(xaxiemacif_s *xaxiemac, struct netif *netif)
{
unsigned link_speed = 1000;
unsigned options;
XAxiEthernet *xaxiemacp;
xaxiemacp = &xaxiemac->axi_ethernet;
XAxiEthernet_Reset(xaxiemacp);
options = XAxiEthernet_GetOptions(xaxiemacp);
options |= XAE_FLOW_CONTROL_OPTION;
#ifdef USE_JUMBO_FRAMES
options |= XAE_JUMBO_OPTION;
#endif
options |= XAE_TRANSMITTER_ENABLE_OPTION;
options |= XAE_RECEIVER_ENABLE_OPTION;
options |= XAE_FCS_STRIP_OPTION;
options |= XAE_MULTICAST_OPTION;
XAxiEthernet_SetOptions(xaxiemacp, options);
XAxiEthernet_ClearOptions(xaxiemacp, ~options);
/* set mac address */
XAxiEthernet_SetMacAddress(xaxiemacp, (unsigned char*)(netif->hwaddr));
link_speed = phy_setup_axiemac(xaxiemacp);
XAxiEthernet_SetOperatingSpeed(xaxiemacp, link_speed);
if (link_speed == 0)
xaxiemac->eth_link_status = ETH_LINK_DOWN;
else
xaxiemac->eth_link_status = ETH_LINK_UP;
/* Setting the operating speed of the MAC needs a delay. */
{
volatile int wait;
for (wait=0; wait < 100000; wait++);
for (wait=0; wait < 100000; wait++);
}
#ifdef NOTNOW
/* in a soft temac implementation, we need to explicitly make sure that
* the RX DCM has been locked. See xps_ll_temac manual for details.
* This bit is guaranteed to be 1 for hard temac's
*/
lock_message_printed = 0;
while (!(XAxiEthernet_ReadReg(xaxiemacp->Config.BaseAddress, XAE_IS_OFFSET)
& XAE_INT_RXDCMLOCK_MASK)) {
int first = 1;
if (first) {
LWIP_DEBUGF(NETIF_DEBUGF, ("Waiting for RX DCM to lock.."));
first = 0;
lock_message_printed = 1;
}
}
if (lock_message_printed)
LWIP_DEBUGF(NETIF_DEBUGF, ("RX DCM locked.\r\n"));
#endif
/* start the temac */
XAxiEthernet_Start(xaxiemacp);
/* enable MAC interrupts */
XAxiEthernet_IntEnable(xaxiemacp, XAE_INT_RECV_ERROR_MASK);
}
void xaxiemac_error_handler(XAxiEthernet * Temac)
{
unsigned Pending;
Pending = XAxiEthernet_IntPending(Temac);
XAxiEthernet_IntClear(Temac, Pending);
}

View File

@ -1,50 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef __XAXIEMACIF_HW_H_
#define __XAXIEMACIF_HW_H_
#include "netif/xaxiemacif.h"
#include "lwip/netif.h"
#ifdef __cplusplus
extern "C" {
#endif
XAxiEthernet_Config * xaxiemac_lookup_config(unsigned mac_base);
void init_axiemac(xaxiemacif_s *xaxiemacif, struct netif *netif);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,839 +0,0 @@
/*
* Copyright (C) 2018 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "lwip/sys.h"
#endif
#include "lwip/stats.h"
#include "lwip/inet_chksum.h"
#include "netif/xadapter.h"
#include "netif/xaxiemacif.h"
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
#include "xscugic.h"
#endif
#include "xstatus.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#if defined __aarch64__
#include "xil_mmu.h"
#endif
#if defined ARMR5
#include "xil_mpu.h"
#endif
#define PARTIAL_CSUM_ENABLE 0x00000001 /* Option for partial csum enable */
#define FULL_CSUM_ENABLE 0x00000002 /* Option for full csum enable */
#define BD_USR0_OFFSET 0 /* AXI4-Stream Control Word offset from
* the start of app user words in BD.
* Offset 0 means, Control Word 0, used
* for enabling checksum offloading.
*/
#define BD_USR1_OFFSET 1 /* AXI4-Stream Control Word offset from
* the start of app user words in BD.
* Offset means, Control Word 1, used
* for mentioning checksum begin and
* checksum insert points
*/
#define BD_USR2_OFFSET 2 /* AXI4-Stream Control Word offset from
* the start of app user words in BD.
* Offset 2 means, Control Word 2, used
* for mentioning checksum seed.
*/
#define XMCDMA_ALL_BDS 0xFFFF
#define XMCDMA_BD_LENGTH_MASK 0x007FFFFF
#define XMCDMA_COALESCEDELAY 0x1
#define RESET_TIMEOUT_COUNT 10000
#define BLOCK_SIZE_2MB 0x200000
#define BLOCK_SIZE_1MB 0x100000
#if defined (__aarch64__)
#define BD_SIZE BLOCK_SIZE_2MB
static u8_t bd_space[BD_SIZE] __attribute__ ((aligned (BLOCK_SIZE_2MB)));
#else
#define BD_SIZE BLOCK_SIZE_1MB
static u8_t bd_space[BD_SIZE] __attribute__ ((aligned (BLOCK_SIZE_1MB)));
#endif
static u8_t *bd_mem_ptr = bd_space;
#if !NO_SYS
extern u32 xInsideISR;
#endif
static inline void bd_csum_enable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, PARTIAL_CSUM_ENABLE);
}
static inline void bd_csum_disable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, ~PARTIAL_CSUM_ENABLE);
}
static inline void bd_fullcsum_disable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, ~FULL_CSUM_ENABLE);
}
static inline void bd_fullcsum_enable(XMcdma_Bd *bd)
{
XMcDma_BdSetAppWord(bd, BD_USR0_OFFSET, FULL_CSUM_ENABLE);
}
static inline void bd_csum_set(XMcdma_Bd *bd, u16_t tx_csbegin,
u16_t tx_csinsert, u16_t tx_csinit)
{
u32_t app1;
bd_csum_enable(bd);
/* write start offset and insert offset into BD */
app1 = ((u32_t)tx_csbegin << 16) | tx_csinsert;
XMcDma_BdSetAppWord(bd, BD_USR1_OFFSET, app1);
/* insert init value */
XMcDma_BdSetAppWord(bd, BD_USR2_OFFSET, tx_csinit);
}
static inline u32_t extract_packet_len(XMcdma_Bd *rxbd) {
return XMcDma_BdGetActualLength(rxbd, XMCDMA_BD_LENGTH_MASK);
}
static inline u16_t extract_csum(XMcdma_Bd *rxbd) {
return XMcdma_BdRead64(rxbd, XMCDMA_BD_USR3_OFFSET) & 0xffff;
}
static inline u32_t csum_sub(u32_t csum, u16_t v)
{
csum += (u32_t)v;
return csum + (csum < (u32_t)v);
}
/*
* compare if the h/w computed checksum (stored in the rxbd)
* equals the TCP checksum value in the packet
*/
s32_t is_checksum_valid(XMcdma_Bd *rxbd, struct pbuf *p)
{
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto == IP_PROTO_TCP) {
u32_t iphdr_len;
u16_t csum_in_rxbd, pseudo_csum, iphdr_csum, padding_csum;
u16_t tcp_payload_offset;
u32_t computed_csum;
u16_t padding_len, tcp_payload_len, packet_len;
u16_t csum;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_payload_len = htons(IPH_LEN(&ehdr->ip)) -
IPH_HL(&ehdr->ip) * 4;
packet_len = extract_packet_len(rxbd);
padding_len = packet_len - tcp_payload_offset - tcp_payload_len;
csum_in_rxbd = extract_csum(rxbd);
pseudo_csum = htons(ip_chksum_pseudo(NULL, proto,
tcp_payload_len,
(ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest));
/* xps_ll_temac computes the checksum of the packet starting
* at byte XAE_HDR_SIZE we need to subtract the values of
* the ethernet & IP headers
*/
iphdr_csum = inet_chksum(p->payload + XAE_HDR_SIZE, iphdr_len);
/* compute csum of padding bytes, if any */
padding_csum = inet_chksum(p->payload + p->tot_len -
padding_len, padding_len);
/* get the h/w checksum value */
computed_csum = (u32_t)csum_in_rxbd;
/* remove the effect of csumming the iphdr */
computed_csum = csum_sub(computed_csum, ~iphdr_csum);
/* add in the pseudo csum */
computed_csum = csum_sub(computed_csum, ~pseudo_csum);
/* remove any padding effect */
computed_csum = csum_sub(computed_csum, ~padding_csum);
/* normalize computed csum */
while (computed_csum >> 16) {
computed_csum = (computed_csum & 0xffff) +
(computed_csum >> 16);
}
/* convert to 16 bits and take 1's complement */
csum = (u16_t)computed_csum;
csum = ~csum;
/* chksum is valid if: computed csum over the packet is 0 */
return !csum;
} else {
/* just say yes to all other packets */
/* the upper layers in the stack will compute and
* verify the checksum */
return 1;
}
}
#define XMcdma_BdMemCalc(Alignment, NumBd) \
(int)((sizeof(XMcdma_Bd)+((Alignment)-1)) & ~((Alignment)-1))*(NumBd)
static inline void *alloc_bdspace(int n_desc, u32 alignment)
{
int space = XMcdma_BdMemCalc(alignment, n_desc);
void *unaligned_mem = bd_mem_ptr;
void *aligned_mem =
(void *)(((UINTPTR)(unaligned_mem + alignment - 1)) & ~(alignment - 1));
if (aligned_mem + space > (void *)(bd_space + BD_SIZE)) {
LWIP_DEBUGF(NETIF_DEBUG, ("Unable to allocate BD space\r\n"));
return NULL;
}
bd_mem_ptr = aligned_mem + space;
return aligned_mem;
}
static void axi_mcdma_send_error_handler(void *CallBackRef, u32 ChanId, u32 Mask)
{
u32 timeOut;
XMcdma *McDmaInstPtr = (XMcdma *)((void *)CallBackRef);
#if !NO_SYS
xInsideISR++;
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma error interrupt is asserted, Chan_id = "
"%d, Mask = %d\r\n", __FUNCTION__, ChanId, Mask));
XMcDma_Reset(McDmaInstPtr);
timeOut = RESET_TIMEOUT_COUNT;
while (timeOut) {
if (XMcdma_ResetIsDone(McDmaInstPtr))
break;
timeOut -= 1;
}
if (!timeOut) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma reset timed out\r\n", __func__));
}
#if !NO_SYS
xInsideISR--;
#endif
}
static void axi_mcdma_send_handler(void *CallBackRef, u32 ChanId)
{
XMcdma *McDmaInstPtr = (XMcdma *)((void *)CallBackRef);
XMcdma_ChanCtrl *Tx_Chan = XMcdma_GetMcdmaTxChan(McDmaInstPtr, ChanId);
#if !NO_SYS
xInsideISR++;
#endif
process_sent_bds(Tx_Chan);
#if !NO_SYS
xInsideISR--;
#endif
}
static void setup_rx_bds(XMcdma_ChanCtrl *Rx_Chan, u32_t n_bds)
{
XMcdma_Bd *rxbd;
u32_t i = 0;
XStatus status;
struct pbuf *p;
u32 bdsts;
#ifdef USE_JUMBO_FRAMES
u32 max_frame_size = XAE_MAX_JUMBO_FRAME_SIZE + IEEE_1588_PAD_SIZE;
#else
u32 max_frame_size = XAE_MAX_FRAME_SIZE + IEEE_1588_PAD_SIZE;
#endif
for (i = 0; i < n_bds; i++) {
p = pbuf_alloc(PBUF_RAW, max_frame_size, PBUF_POOL);
if (!p) {
LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
return;
}
rxbd = (XMcdma_Bd *)XMcdma_GetChanCurBd(Rx_Chan);
status = XMcDma_ChanSubmit(Rx_Chan, (UINTPTR)p->payload,
p->len);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
pbuf_free(p);
return;
}
/* Clear everything but the COMPLETE bit, which is cleared when
* committed to hardware.
*/
bdsts = XMcDma_BdGetSts(rxbd);
bdsts &= XMCDMA_BD_STS_COMPLETE_MASK;
XMcdma_BdWrite(rxbd, XMCDMA_BD_STS_OFFSET, bdsts);
XMcDma_BdSetCtrl(rxbd, 0);
XMcdma_BdSetSwId(rxbd, p);
#if defined(__aarch64__)
Xil_DCacheInvalidateRange((UINTPTR)p->payload,
(UINTPTR)max_frame_size);
#else
Xil_DCacheFlushRange((UINTPTR)p->payload,
(UINTPTR)max_frame_size);
#endif
}
#if !defined (__MICROBLAZE__) && !defined (__riscv)
dsb();
#endif
if (n_bds) {
/* Enqueue to HW */
status = XMcDma_ChanToHw(Rx_Chan);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware\n\r"));
}
}
}
static void axi_mcdma_recv_error_handler(void *CallBackRef, u32 ChanId)
{
u32 timeOut;
XMcdma_ChanCtrl *Rx_Chan;
struct xemac_s *xemac = (struct xemac_s *)(CallBackRef);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XMcdma *McDmaInstPtr = &xaxiemacif->aximcdma;
#if !NO_SYS
xInsideISR++;
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma error interrupt is asserted\r\n",
__FUNCTION__));
Rx_Chan = XMcdma_GetMcdmaRxChan(McDmaInstPtr, ChanId);
setup_rx_bds(Rx_Chan, Rx_Chan->BdCnt);
XMcDma_Reset(McDmaInstPtr);
timeOut = RESET_TIMEOUT_COUNT;
while (timeOut) {
if (XMcdma_ResetIsDone(McDmaInstPtr))
break;
timeOut -= 1;
}
if (!timeOut) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s: Error: aximcdma reset timed out\r\n", __func__));
}
XMcDma_ChanToHw(Rx_Chan);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
static void axi_mcdma_recv_handler(void *CallBackRef, u32 ChanId)
{
struct pbuf *p;
u32 i, rx_bytes, ProcessedBdCnt;
XMcdma_Bd *rxbd, *rxbdset;
struct xemac_s *xemac = (struct xemac_s *)(CallBackRef);
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XMcdma *McDmaInstPtr = &xaxiemacif->aximcdma;
XMcdma_ChanCtrl *Rx_Chan;
#if !NO_SYS
xInsideISR++;
#endif
Rx_Chan = XMcdma_GetMcdmaRxChan(McDmaInstPtr, ChanId);
ProcessedBdCnt = XMcdma_BdChainFromHW(Rx_Chan, XMCDMA_ALL_BDS, &rxbdset);
for (i = 0, rxbd = rxbdset; i < ProcessedBdCnt; i++) {
p = (struct pbuf *)(UINTPTR)XMcdma_BdGetSwId(rxbd);
/* Adjust the buffer size to actual number of bytes received.*/
rx_bytes = extract_packet_len(rxbd);
#ifndef __aarch64__
Xil_DCacheInvalidateRange((UINTPTR)p->payload,
(UINTPTR)rx_bytes);
#endif
pbuf_realloc(p, rx_bytes);
#if LWIP_PARTIAL_CSUM_OFFLOAD_RX==1
/* Verify for partial checksum offload case */
if (!is_checksum_valid(rxbd, p)) {
LWIP_DEBUGF(NETIF_DEBUG, ("Incorrect csum as calculated by the hw\r\n"));
}
#endif
/* store it in the receive queue,
* where it'll be processed by a different handler
*/
if (pq_enqueue(xaxiemacif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
}
rxbd = (XMcdma_Bd *)XMcdma_BdChainNextBd(Rx_Chan, rxbd);
}
/* free up the BD's */
XMcdma_BdChainFree(Rx_Chan, ProcessedBdCnt, rxbdset);
/* return all the processed bd's back to the stack */
setup_rx_bds(Rx_Chan, Rx_Chan->BdCnt);
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
}
s32_t xaxiemac_is_tx_space_available(xaxiemacif_s *xaxiemacif)
{
XMcdma_ChanCtrl *Tx_Chan;
u8_t ChanId;
for (ChanId = 1;
ChanId <= xaxiemacif->axi_ethernet.Config.AxiMcDmaChan_Cnt;
ChanId++) {
Tx_Chan = XMcdma_GetMcdmaTxChan(&xaxiemacif->aximcdma, ChanId);
if (Tx_Chan->BdCnt) {
return Tx_Chan->BdCnt;
}
}
return 0;
}
s32_t process_sent_bds(XMcdma_ChanCtrl *Tx_Chan)
{
int ProcessedBdCnt, i;
XStatus status;
XMcdma_Bd *txbdset, *txbd;
ProcessedBdCnt = XMcdma_BdChainFromHW(Tx_Chan, XMCDMA_ALL_BDS,
&txbdset);
if (ProcessedBdCnt == 0) {
return XST_FAILURE;
}
/* free the pbuf associated with each BD */
for (i = 0, txbd = txbdset; i < ProcessedBdCnt; i++) {
struct pbuf *p = (struct pbuf *)(UINTPTR)XMcdma_BdGetSwId(txbd);
pbuf_free(p);
txbd = (XMcdma_Bd *)XMcdma_BdChainNextBd(Tx_Chan, txbd);
}
/* free the processed BD's */
status = XMcdma_BdChainFree(Tx_Chan, ProcessedBdCnt, txbdset);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error freeing up TxBDs"));
return XST_FAILURE;
}
return XST_SUCCESS;
}
#if LWIP_PARTIAL_CSUM_OFFLOAD_TX==1
static void update_partial_cksum_offload(XMcdma_Bd *txbdset, struct pbuf *p)
{
if (p->len > sizeof(struct ethip_hdr)) {
struct ethip_hdr *ehdr = p->payload;
u8_t proto = IPH_PROTO(&ehdr->ip);
/* check if it is a TCP packet */
if (htons(ehdr->eth.type) == ETHTYPE_IP && proto ==
IP_PROTO_TCP) {
u32_t iphdr_len, csum_insert_offset;
u16_t tcp_len; /* TCP header length + data length in bytes */
u16_t csum_init = 0;
u16_t tcp_payload_offset;
/* determine length of IP header */
iphdr_len = (IPH_HL(&ehdr->ip) * 4);
tcp_payload_offset = XAE_HDR_SIZE + iphdr_len;
tcp_len = p->tot_len - tcp_payload_offset;
/* insert checksum at offset 16 for TCP, 6 for UDP */
if (proto == IP_PROTO_TCP)
csum_insert_offset = tcp_payload_offset + 16;
else if (proto == IP_PROTO_UDP)
csum_insert_offset = tcp_payload_offset + 6;
else
csum_insert_offset = 0;
/* compute pseudo header checksum value */
csum_init = ip_chksum_pseudo(NULL, proto, tcp_len,
(ip_addr_t *)&ehdr->ip.src,
(ip_addr_t *)&ehdr->ip.dest);
/* init buffer descriptor */
bd_csum_set(txbdset, tcp_payload_offset,
csum_insert_offset, htons(~csum_init));
}
}
}
#endif
XStatus axi_mcdma_sgsend(xaxiemacif_s *xaxiemacif, struct pbuf *p)
{
struct pbuf *q;
u32_t n_pbufs = 0;
XMcdma_Bd *txbdset, *txbd, *last_txbd = NULL;
XMcdma_ChanCtrl *Tx_Chan;
XStatus status;
static u8_t ChanId = 1;
u8_t next_ChanId = ChanId;
/* first count the number of pbufs */
for (q = p; q != NULL; q = q->next)
n_pbufs++;
/* Transfer packets to TX DMA Channels in round-robin manner */
do {
Tx_Chan = XMcdma_GetMcdmaTxChan(&xaxiemacif->aximcdma, ChanId);
if (++ChanId > xaxiemacif->axi_ethernet.Config.AxiMcDmaChan_Cnt)
ChanId = 1;
if ((next_ChanId == ChanId) && (n_pbufs > Tx_Chan->BdCnt)) {
LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error, not enough BD space in All Chans\r\n"));
return ERR_IF;
}
} while (n_pbufs > Tx_Chan->BdCnt);
txbdset = (XMcdma_Bd *)XMcdma_GetChanCurBd(Tx_Chan);
for (q = p, txbd = txbdset; q != NULL; q = q->next) {
/* Send the data from the pbuf to the interface, one pbuf at a
* time. The size of the data in each pbuf is kept in the ->len
* variable.
*/
XMcDma_BdSetCtrl(txbd, 0);
XMcdma_BdSetSwId(txbd, (void *)q);
Xil_DCacheFlushRange((UINTPTR)q->payload, q->len);
status = XMcDma_ChanSubmit(Tx_Chan, (UINTPTR)q->payload,
q->len);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("ChanSubmit failed\n\r"));
return XST_FAILURE;
}
pbuf_ref(q);
last_txbd = txbd;
txbd = (XMcdma_Bd *)XMcdma_BdChainNextBd(Tx_Chan, txbd);
}
if (n_pbufs == 1) {
XMcDma_BdSetCtrl(txbdset, XMCDMA_BD_CTRL_SOF_MASK
| XMCDMA_BD_CTRL_EOF_MASK);
} else {
/* in the first packet, set the SOP */
XMcDma_BdSetCtrl(txbdset, XMCDMA_BD_CTRL_SOF_MASK);
/* in the last packet, set the EOP */
XMcDma_BdSetCtrl(last_txbd, XMCDMA_BD_CTRL_EOF_MASK);
}
#if LWIP_FULL_CSUM_OFFLOAD_TX==1
bd_fullcsum_disable(txbdset);
if (p->len > sizeof(struct ethip_hdr)) {
bd_fullcsum_enable(txbdset);
}
#endif
#if LWIP_PARTIAL_CSUM_OFFLOAD_TX==1
bd_csum_disable(txbdset);
update_partial_cksum_offload(txbdset, p);
#endif
DATA_SYNC;
/* enq to h/w */
return XMcDma_ChanToHw(Tx_Chan);
}
void axi_mcdma_register_handlers(struct xemac_s *xemac, u8 ChanId)
{
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
XMcdma *McDmaInstPtr = &xaxiemacif->aximcdma;
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#if XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.TemacIntr,
(Xil_InterruptHandler)xaxiemac_error_handler,
&xaxiemacif->axi_ethernet);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiMcDmaRxIntr[ChanId - 1],
(Xil_InterruptHandler)XMcdma_IntrHandler,
McDmaInstPtr);
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xaxiemacif->axi_ethernet.Config.AxiMcDmaTxIntr[ChanId - 1],
(Xil_InterruptHandler)XMcdma_TxIntrHandler,
McDmaInstPtr);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr,
AXIETH_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaTxIntr[ChanId - 1],
AXIDMA_TX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaRxIntr[ChanId - 1],
AXIDMA_RX_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.TemacIntr);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaTxIntr[ChanId - 1]);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xaxiemacif->axi_ethernet.Config.AxiMcDmaRxIntr[ChanId - 1]);
#endif /* XLWIP_CONFIG_INCLUDE_AXIETH_ON_ZYNQ */
}
XStatus axi_mcdma_setup_rx_chan(struct xemac_s *xemac, u32_t ChanId)
{
XMcdma_ChanCtrl *Rx_Chan;
XStatus status;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
/* RX chan configurations */
Rx_Chan = XMcdma_GetMcdmaRxChan(&xaxiemacif->aximcdma, ChanId);
/* Disable all interrupts */
XMcdma_IntrDisable(Rx_Chan, XMCDMA_IRQ_ALL_MASK);
status = XMcDma_ChanBdCreate(Rx_Chan, (UINTPTR) xaxiemacif->rx_bdspace,
XLWIP_CONFIG_N_RX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Rx bd create failed with %d\r\n", status));
return XST_FAILURE;
}
xaxiemacif->rx_bdspace += (XLWIP_CONFIG_N_RX_DESC * sizeof(XMcdma_Bd));
/* Setup Interrupt System and register callbacks */
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_HANDLER_DONE,
(void *)axi_mcdma_recv_handler, xemac);
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_HANDLER_ERROR,
(void *)axi_mcdma_recv_error_handler, xemac);
status = XMcdma_SetChanCoalesceDelay(Rx_Chan,
XLWIP_CONFIG_N_RX_COALESCE,
XMCDMA_COALESCEDELAY);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
setup_rx_bds(Rx_Chan, XLWIP_CONFIG_N_RX_DESC);
/* enable DMA interrupts */
XMcdma_IntrEnable(Rx_Chan, XMCDMA_IRQ_ALL_MASK);
return XST_SUCCESS;
}
XStatus axi_mcdma_setup_tx_chan(struct xemac_s *xemac, u8 ChanId)
{
XStatus status;
XMcdma_ChanCtrl *Tx_Chan;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
/* TX chan configurations */
Tx_Chan = XMcdma_GetMcdmaTxChan(&xaxiemacif->aximcdma, ChanId);
XMcdma_IntrDisable(Tx_Chan, XMCDMA_IRQ_ALL_MASK);
status = XMcDma_ChanBdCreate(Tx_Chan, (UINTPTR) xaxiemacif->tx_bdspace,
XLWIP_CONFIG_N_TX_DESC);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("TX bd create failed with %d\r\n", status));
return XST_FAILURE;
}
xaxiemacif->tx_bdspace += (XLWIP_CONFIG_N_TX_DESC * sizeof(XMcdma_Bd));
/* Setup Interrupt System and register callbacks */
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_TX_HANDLER_DONE,
(void *)axi_mcdma_send_handler, &xaxiemacif->aximcdma);
XMcdma_SetCallBack(&xaxiemacif->aximcdma, XMCDMA_TX_HANDLER_ERROR,
(void *)axi_mcdma_send_error_handler,
&xaxiemacif->aximcdma);
status = XMcdma_SetChanCoalesceDelay(Tx_Chan,
XLWIP_CONFIG_N_TX_COALESCE,
XMCDMA_COALESCEDELAY);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("Error setting coalescing settings\r\n"));
return ERR_IF;
}
XMcdma_IntrEnable(Tx_Chan, XMCDMA_IRQ_ALL_MASK);
return XST_SUCCESS;
}
XStatus init_axi_mcdma(struct xemac_s *xemac)
{
XMcdma_Config *dmaconfig;
XStatus status;
u32_t ChanId;
xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state);
UINTPTR baseaddr;
/*
* Disable L1 prefetch if the processor type is Cortex A53. It is
* observed that the L1 prefetching for ARMv8 can cause issues while
* dealing with cache memory on Rx path. On Rx path, the lwIP adapter
* does a clean and invalidation of buffers (pbuf payload) before
* allocating them to Rx BDs. However, there are chances that the
* the same cache line may get prefetched by the time Rx data is
* DMAed to the same buffer. In such cases, CPU fetches stale data from
* cache memory instead of getting them from memory. To avoid such
* scenarios L1 prefetch is being disabled for ARMv8. That can cause
* a performance degradation in the range of 3-5%. In tests, it is
* generally observed that this performance degradation is quite
* insignificant to be really visible.
*/
#if defined __aarch64__
Xil_ConfigureL1Prefetch(0);
#endif
xaxiemacif->rx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_RX_DESC *
(XMCDMA_MAX_CHAN_PER_DEVICE / 2),
XMCDMA_BD_MINIMUM_ALIGNMENT);
if (!xaxiemacif->rx_bdspace) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: Unable to allocate memory for "
"RX buffer descriptors", __FILE__, __LINE__));
return ERR_IF;
}
xaxiemacif->tx_bdspace = alloc_bdspace(XLWIP_CONFIG_N_TX_DESC *
(XMCDMA_MAX_CHAN_PER_DEVICE / 2),
XMCDMA_BD_MINIMUM_ALIGNMENT);
if (!xaxiemacif->tx_bdspace) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: Unable to allocate memory for "
"TX buffer descriptors", __FILE__, __LINE__));
return ERR_IF;
}
/* Mark the BD Region as uncacheable */
#if defined(__aarch64__)
Xil_SetTlbAttributes((UINTPTR)bd_space,
NORM_NONCACHE | INNER_SHAREABLE);
#elif defined (ARMR5)
Xil_SetTlbAttributes((INTPTR)bd_space,
DEVICE_SHARED | PRIV_RW_USER_RW);
#else
Xil_SetTlbAttributes((INTPTR)bd_space, DEVICE_MEMORY);
#endif
LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n",
xaxiemacif->rx_bdspace));
LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n",
xaxiemacif->tx_bdspace));
/* Initialize MCDMA */
baseaddr = xaxiemacif->axi_ethernet.Config.AxiDevBaseAddress;
dmaconfig = XMcdma_LookupConfigBaseAddr(baseaddr);
if (!baseaddr) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: Lookup Config failed\r\n", __FILE__,
__LINE__));
}
status = XMcDma_CfgInitialize(&xaxiemacif->aximcdma, dmaconfig);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: MCDMA config initialization failed\r\n", __FILE__, __LINE__));
return XST_FAILURE;
}
/* Setup Rx/Tx chan and Interrupts */
for (ChanId = 1;
ChanId <= xaxiemacif->axi_ethernet.Config.AxiMcDmaChan_Cnt;
ChanId++) {
status = axi_mcdma_setup_rx_chan(xemac, ChanId);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: MCDMA Rx chan setup failed\r\n", __FILE__, __LINE__));
return XST_FAILURE;
}
status = axi_mcdma_setup_tx_chan(xemac, ChanId);
if (status != XST_SUCCESS) {
LWIP_DEBUGF(NETIF_DEBUG, ("%s@%d: Error: MCDMA Tx chan setup failed\r\n", __FILE__, __LINE__));
return XST_FAILURE;
}
axi_mcdma_register_handlers(xemac, ChanId);
}
return XST_SUCCESS;
}

View File

@ -1,834 +0,0 @@
/*
* Copyright (C) 2010 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "netif/xaxiemacif.h"
#include "lwipopts.h"
#include "sleep.h"
#include "xemac_ieee_reg.h"
#define PHY_R0_ISOLATE 0x0400
#define PHY_DETECT_REG 1
#define PHY_IDENTIFIER_1_REG 2
#define PHY_IDENTIFIER_2_REG 3
#define PHY_DETECT_MASK 0x1808
#define PHY_MARVELL_IDENTIFIER 0x0141
#define PHY_TI_IDENTIFIER 0x2000
/* Marvel PHY flags */
#define MARVEL_PHY_IDENTIFIER 0x141
#define MARVEL_PHY_MODEL_NUM_MASK 0x3F0
#define MARVEL_PHY_88E1111_MODEL 0xC0
#define MARVEL_PHY_88E1116R_MODEL 0x240
#define PHY_88E1111_RGMII_RX_CLOCK_DELAYED_MASK 0x0080
/* TI PHY Flags */
#define TI_PHY_DETECT_MASK 0x796D
#define TI_PHY_IDENTIFIER 0x2000
#define TI_PHY_DP83867_MODEL 0xA231
#define DP83867_RGMII_CLOCK_DELAY_CTRL_MASK 0x0003
#define DP83867_RGMII_TX_CLOCK_DELAY_MASK 0x0030
#define DP83867_RGMII_RX_CLOCK_DELAY_MASK 0x0003
/* TI DP83867 PHY Registers */
#define DP83867_R32_RGMIICTL1 0x32
#define DP83867_R86_RGMIIDCTL 0x86
#define TI_PHY_REGCR 0xD
#define TI_PHY_ADDDR 0xE
#define TI_PHY_PHYCTRL 0x10
#define TI_PHY_CFGR2 0x14
#define TI_PHY_SGMIITYPE 0xD3
#define TI_PHY_CFGR2_SGMII_AUTONEG_EN 0x0080
#define TI_PHY_SGMIICLK_EN 0x4000
#define TI_PHY_REGCR_DEVAD_EN 0x001F
#define TI_PHY_REGCR_DEVAD_DATAEN 0x4000
#define TI_PHY_CFGR2_MASK 0x003F
#define TI_PHY_REGCFG4 0x31
#define TI_PHY_REGCR_DATA 0x401F
#define TI_PHY_CFG4RESVDBIT7 0x80
#define TI_PHY_CFG4RESVDBIT8 0x100
#define TI_PHY_CFG4_AUTONEG_TIMER 0x60
#define TI_PHY_CFG2_SPEEDOPT_10EN 0x0040
#define TI_PHY_CFG2_SGMII_AUTONEGEN 0x0080
#define TI_PHY_CFG2_SPEEDOPT_ENH 0x0100
#define TI_PHY_CFG2_SPEEDOPT_CNT 0x0800
#define TI_PHY_CFG2_SPEEDOPT_INTLOW 0x2000
#define TI_PHY_CR_SGMII_EN 0x0800
/* Loop counters to check for reset done
*/
#define RESET_TIMEOUT 0xFFFF
#define AUTO_NEG_TIMEOUT 0x00FFFFFF
#define IEEE_CTRL_RESET 0x9140
#define IEEE_CTRL_ISOLATE_DISABLE 0xFBFF
#define PHY_XILINX_PCS_PMA_ID1 0x0174
#define PHY_XILINX_PCS_PMA_ID2 0x0C00
#ifdef SDT
#define XPAR_AXIETHERNET_0_PHYADDR XPAR_XAXIETHERNET_0_PHYADDR
#define XPAR_AXIETHERNET_0_BASEADDR XPAR_XAXIETHERNET_0_BASEADDR
#endif
extern u32_t phyaddrforemac;
static void __attribute__ ((noinline)) AxiEthernetUtilPhyDelay(unsigned int Seconds);
static int detect_phy(XAxiEthernet *xaxiemacp)
{
u16 phy_reg;
u16 phy_id;
u32 phy_addr;
for (phy_addr = 31; phy_addr > 0; phy_addr--) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_DETECT_REG,
&phy_reg);
if ((phy_reg != 0xFFFF) &&
((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
/* Found a valid PHY address */
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: PHY detected at address %d.\r\n", phy_addr));
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: PHY detected.\r\n"));
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG,
&phy_reg);
if ((phy_reg != PHY_MARVELL_IDENTIFIER) &&
(phy_reg != TI_PHY_IDENTIFIER)){
xil_printf("WARNING: Not a Marvell or TI Ethernet PHY. Please verify the initialization sequence\r\n");
}
phyaddrforemac = phy_addr;
return phy_addr;
}
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID1) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID2) {
/* Found a valid PHY address */
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: PHY detected at address %d.\r\n",
phy_addr));
phyaddrforemac = phy_addr;
return phy_addr;
}
}
}
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet detect_phy: No PHY detected. Assuming a PHY at address 0\r\n"));
/* default to zero */
return 0;
}
static int isphy_pcspma(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 phy_id;
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID1) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG,
&phy_id);
if (phy_id == PHY_XILINX_PCS_PMA_ID2) {
return 1;
}
}
return 0;
}
void XAxiEthernet_PhyReadExtended(XAxiEthernet *InstancePtr, u32 PhyAddress,
u32 RegisterNum, u16 *PhyDataPtr)
{
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_DEVAD_MASK);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, RegisterNum);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_NOPIDEVAD_MASK);
XAxiEthernet_PhyRead(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, PhyDataPtr);
}
void XAxiEthernet_PhyWriteExtended(XAxiEthernet *InstancePtr, u32 PhyAddress,
u32 RegisterNum, u16 PhyDataPtr)
{
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_DEVAD_MASK);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, RegisterNum);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_CONTROL_REG, IEEE_MMD_ACCESS_CTRL_NOPIDEVAD_MASK);
XAxiEthernet_PhyWrite(InstancePtr, PhyAddress,
IEEE_MMD_ACCESS_ADDRESS_DATA_REG, PhyDataPtr);
}
unsigned int get_phy_negotiated_speed (XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 control;
u16 status;
u16 partner_capabilities;
u16 partner_capabilities_1000;
u16 phylinkspeed;
u16 temp;
phy_addr = detect_phy(xaxiemacp);
xil_printf("Start PHY autonegotiation \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
if (isphy_pcspma(xaxiemacp, phy_addr)) {
control &= IEEE_CTRL_ISOLATE_DISABLE;
}
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
control);
if (isphy_pcspma(xaxiemacp, phy_addr)) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
xil_printf("Waiting for PHY to complete autonegotiation \r\n");
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
AxiEthernetUtilPhyDelay(1);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET,
&status);
}
xil_printf("Autonegotiation complete \r\n");
if (xaxiemacp->Config.Speed == XAE_SPEED_2500_MBPS)
return XAE_SPEED_2500_MBPS;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) == XAE_PHY_TYPE_1000BASE_X) {
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) == XAE_PHY_TYPE_1000BASE_X) {
#endif
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 1);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
if ((temp & 0x0020) == 0x0020) {
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
return 1000;
}
else {
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
xil_printf("Link error, temp = %x\r\n", temp);
return 0;
}
#ifndef SDT
} else if(XAxiEthernet_GetPhysicalInterface(xaxiemacp) == XAE_PHY_TYPE_SGMII) {
#else
} else if(XAxiEthernet_Get_Phy_Interface(xaxiemacp) == XAE_PHY_TYPE_SGMII) {
#endif
xil_printf("Waiting for Link to be up; Polling for SGMII core Reg \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
while(!(temp & 0x8000)) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
}
if((temp & 0x0C00) == 0x0800) {
return 1000;
}
else if((temp & 0x0C00) == 0x0400) {
return 100;
}
else if((temp & 0x0C00) == 0x0000) {
return 10;
} else {
xil_printf("get_IEEE_phy_speed(): Invalid speed bit value, Defaulting to Speed = 10 Mbps\r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &temp);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, 0x0100);
return 10;
}
}
}
/* Read PHY control and status registers is successful. */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET,
&status);
if ((control & IEEE_CTRL_AUTONEGOTIATE_ENABLE) && (status &
IEEE_STAT_AUTONEGOTIATE_CAPABLE)) {
xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_STATUS_REG_OFFSET,
&status);
}
xil_printf("autonegotiation complete \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET,
&partner_capabilities);
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_3_REG_OFFSET,
&partner_capabilities_1000);
if (partner_capabilities_1000 &
IEEE_AN3_ABILITY_MASK_1GBPS)
return 1000;
}
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_100MBPS)
return 100;
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_10MBPS)
return 10;
xil_printf("%s: unknown PHY link speed, setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__);
return 10;
} else {
/* Update TEMAC speed accordingly */
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
/* Get commanded link speed */
phylinkspeed = control &
IEEE_CTRL_1GBPS_LINKSPEED_MASK;
switch (phylinkspeed) {
case (IEEE_CTRL_LINKSPEED_1000M):
return 1000;
case (IEEE_CTRL_LINKSPEED_100M):
return 100;
case (IEEE_CTRL_LINKSPEED_10M):
return 10;
default:
xil_printf("%s: unknown PHY link speed (%d), setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__, phylinkspeed);
return 10;
}
} else {
return (control & IEEE_CTRL_LINKSPEED_MASK) ? 100 : 10;
}
}
}
unsigned int get_phy_speed_TI_DP83867(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 phy_val;
u16 control;
xil_printf("Start PHY autonegotiation \r\n");
/* Changing the PHY RX and TX DELAY settings. */
XAxiEthernet_PhyReadExtended(xaxiemacp, phy_addr, DP83867_R32_RGMIICTL1, &phy_val);
phy_val |= DP83867_RGMII_CLOCK_DELAY_CTRL_MASK;
XAxiEthernet_PhyWriteExtended(xaxiemacp, phy_addr, DP83867_R32_RGMIICTL1, phy_val);
XAxiEthernet_PhyReadExtended(xaxiemacp, phy_addr, DP83867_R86_RGMIIDCTL, &phy_val);
phy_val &= 0xFF00;
phy_val |= DP83867_RGMII_TX_CLOCK_DELAY_MASK;
phy_val |= DP83867_RGMII_RX_CLOCK_DELAY_MASK;
XAxiEthernet_PhyWriteExtended(xaxiemacp, phy_addr, DP83867_R86_RGMIIDCTL, phy_val);
/* Set advertised speeds for 10/100/1000Mbps modes. */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
control |= IEEE_ASYMMETRIC_PAUSE_MASK;
control |= IEEE_PAUSE_MASK;
control |= ADVERTISE_100;
control |= ADVERTISE_10;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, &control);
control |= ADVERTISE_1000;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, control);
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
unsigned int get_phy_speed_TI_DP83867_SGMII(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 control;
u16 temp;
u16 phyregtemp;
xil_printf("Start TI PHY autonegotiation \r\n");
/* Enable SGMII Clock */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIITYPE);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN | TI_PHY_REGCR_DEVAD_DATAEN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIICLK_EN);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
control |= (IEEE_CTRL_AUTONEGOTIATE_ENABLE | IEEE_CTRL_LINKSPEED_1000M |
IEEE_CTRL_FULL_DUPLEX);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, TI_PHY_CFGR2, &control);
control &= TI_PHY_CFGR2_MASK;
control |= (TI_PHY_CFG2_SPEEDOPT_10EN |
TI_PHY_CFG2_SGMII_AUTONEGEN |
TI_PHY_CFG2_SPEEDOPT_ENH |
TI_PHY_CFG2_SPEEDOPT_CNT |
TI_PHY_CFG2_SPEEDOPT_INTLOW);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_CFGR2, control);
/* Disable RGMII */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
DP83867_R32_RGMIICTL1);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN | TI_PHY_REGCR_DEVAD_DATAEN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, 0);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_PHYCTRL,
TI_PHY_CR_SGMII_EN);
xil_printf("Waiting for Link to be up \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
while(!(temp & 0x4000)) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
}
xil_printf("Auto negotiation completed for TI PHY\n\r");
/* SW workaround for unstable link when RX_CTRL is not STRAP MODE 3 or 4 */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, TI_PHY_REGCFG4);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DATA);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, TI_PHY_ADDDR, (u16_t *)&phyregtemp);
phyregtemp &= ~(TI_PHY_CFG4RESVDBIT7);
phyregtemp |= TI_PHY_CFG4RESVDBIT8;
phyregtemp &= ~(TI_PHY_CFG4_AUTONEG_TIMER);
phyregtemp |= TI_PHY_CFG4_AUTONEG_TIMER;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, TI_PHY_REGCFG4);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR, TI_PHY_REGCR_DATA);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR, phyregtemp);
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
unsigned int get_phy_speed_88E1116R(XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 phy_val;
u16 control;
u16 status;
u16 partner_capabilities;
xil_printf("Start PHY autonegotiation \r\n");
XAxiEthernet_PhyWrite(xaxiemacp,phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 2);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, &control);
control |= IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, control);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
control |= IEEE_ASYMMETRIC_PAUSE_MASK;
control |= IEEE_PAUSE_MASK;
control |= ADVERTISE_100;
control |= ADVERTISE_10;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
&control);
control |= ADVERTISE_1000;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
control);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_COPPER_SPECIFIC_CONTROL_REG,
&control);
control |= (7 << 12); /* max number of gigabit atphy_valts */
control |= (1 << 11); /* enable downshift */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_COPPER_SPECIFIC_CONTROL_REG,
control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
control |= IEEE_CTRL_RESET_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
while (1) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
if (control & IEEE_CTRL_RESET_MASK)
continue;
else
break;
}
xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
AxiEthernetUtilPhyDelay(1);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_COPPER_SPECIFIC_STATUS_REG_2,
&phy_val);
if (phy_val & IEEE_AUTONEG_ERROR_MASK) {
xil_printf("Auto negotiation error \r\n");
}
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_STATUS_REG_OFFSET,
&status);
}
xil_printf("autonegotiation complete \r\n");
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_SPECIFIC_STATUS_REG,
&partner_capabilities);
if ( ((partner_capabilities >> 14) & 3) == 2)/* 1000Mbps */
return 1000;
else if ( ((partner_capabilities >> 14) & 3) == 1)/* 100Mbps */
return 100;
else /* 10Mbps */
return 10;
}
unsigned int get_phy_speed_88E1111 (XAxiEthernet *xaxiemacp, u32 phy_addr)
{
u16 control;
int TimeOut;
u16 phy_val;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_2_0) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_EXT_PHY_SPECIFIC_CONTROL_REG, &phy_val);
phy_val |= PHY_88E1111_RGMII_RX_CLOCK_DELAYED_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_EXT_PHY_SPECIFIC_CONTROL_REG, phy_val);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
&control);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_RESET_MASK);
TimeOut = RESET_TIMEOUT;
while (TimeOut) {
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_CONTROL_REG_OFFSET, &control);
if (!(control & IEEE_CTRL_RESET_MASK))
break;
TimeOut -= 1;
}
if (!TimeOut) {
xil_printf("%s: Phy Reset failed\n\r", __FUNCTION__);
return 0;
}
}
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
ADVERTISE_1000);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100_AND_10);
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
unsigned get_IEEE_phy_speed(XAxiEthernet *xaxiemacp)
{
u16 phy_identifier;
u16 phy_model;
u8 phytype;
#ifdef XPAR_AXIETHERNET_0_BASEADDR
u32 phy_addr = detect_phy(xaxiemacp);
/* Get the PHY Identifier and Model number */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG, &phy_identifier);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG, &phy_model);
/* Depending upon what manufacturer PHY is connected, a different mask is
* needed to determine the specific model number of the PHY. */
if (phy_identifier == MARVEL_PHY_IDENTIFIER) {
phy_model = phy_model & MARVEL_PHY_MODEL_NUM_MASK;
if (phy_model == MARVEL_PHY_88E1116R_MODEL) {
return get_phy_speed_88E1116R(xaxiemacp, phy_addr);
} else if (phy_model == MARVEL_PHY_88E1111_MODEL) {
return get_phy_speed_88E1111(xaxiemacp, phy_addr);
}
} else if (phy_identifier == TI_PHY_IDENTIFIER) {
phy_model = phy_model & TI_PHY_DP83867_MODEL;
#ifndef SDT
phytype = XAxiEthernet_GetPhysicalInterface(xaxiemacp);
#else
phytype = XAxiEthernet_Get_Phy_Interface(xaxiemacp);
#endif
if (phy_model == TI_PHY_DP83867_MODEL && phytype == XAE_PHY_TYPE_SGMII) {
return get_phy_speed_TI_DP83867_SGMII(xaxiemacp, phy_addr);
}
if (phy_model == TI_PHY_DP83867_MODEL) {
return get_phy_speed_TI_DP83867(xaxiemacp, phy_addr);
}
}
else {
LWIP_DEBUGF(NETIF_DEBUG, ("XAxiEthernet get_IEEE_phy_speed: Detected PHY with unknown identifier/model.\r\n"));
}
#endif
if (isphy_pcspma(xaxiemacp, phy_addr)) {
return get_phy_negotiated_speed(xaxiemacp, phy_addr);
}
}
unsigned configure_IEEE_phy_speed(XAxiEthernet *xaxiemacp, unsigned speed)
{
u16 control;
u32 phy_addr = detect_phy(xaxiemacp);
u16 phy_val;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_2_0) {
/* Setting Tx and Rx Delays for RGMII mode */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0x2);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, &phy_val);
phy_val |= IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK;
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_CONTROL_REG_MAC, phy_val);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0x0);
}
XAxiEthernet_PhyRead(xaxiemacp, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
control &= ~IEEE_CTRL_LINKSPEED_1000M;
control &= ~IEEE_CTRL_LINKSPEED_100M;
control &= ~IEEE_CTRL_LINKSPEED_10M;
if (speed == 1000) {
control |= IEEE_CTRL_LINKSPEED_1000M;
}
else if (speed == 100) {
control |= IEEE_CTRL_LINKSPEED_100M;
/* Don't advertise PHY speed of 1000 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 10 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100);
}
else if (speed == 10) {
control |= IEEE_CTRL_LINKSPEED_10M;
/* Don't advertise PHY speed of 1000 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 100 Mbps */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_10);
}
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr,
IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_RESET_MASK);
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_SGMII) {
control &= (~PHY_R0_ISOLATE);
XAxiEthernet_PhyWrite(xaxiemacp,
XPAR_AXIETHERNET_0_PHYADDR,
IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_AUTONEGOTIATE_ENABLE);
}
{
volatile int wait;
for (wait=0; wait < 100000; wait++);
for (wait=0; wait < 100000; wait++);
}
return 0;
}
unsigned phy_setup_axiemac (XAxiEthernet *xaxiemacp)
{
unsigned link_speed = 1000;
#ifndef SDT
if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_1_3) {
; /* Add PHY initialization code for RGMII 1.3 */
#ifndef SDT
} else if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
} else if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_RGMII_2_0) {
; /* Add PHY initialization code for RGMII 2.0 */
#ifndef SDT
} else if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
} else if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_SGMII) {
#ifdef CONFIG_LINKSPEED_AUTODETECT
u32 phy_wr_data = IEEE_CTRL_AUTONEGOTIATE_ENABLE |
IEEE_CTRL_LINKSPEED_1000M;
phy_wr_data &= (~PHY_R0_ISOLATE);
XAxiEthernet_PhyWrite(xaxiemacp,
XPAR_AXIETHERNET_0_PHYADDR,
IEEE_CONTROL_REG_OFFSET,
phy_wr_data);
#endif
#ifndef SDT
} else if (XAxiEthernet_GetPhysicalInterface(xaxiemacp) ==
#else
} else if (XAxiEthernet_Get_Phy_Interface(xaxiemacp) ==
#endif
XAE_PHY_TYPE_1000BASE_X) {
; /* Add PHY initialization code for 1000 Base-X */
}
/* set PHY <--> MAC data clock */
#ifdef CONFIG_LINKSPEED_AUTODETECT
link_speed = get_IEEE_phy_speed(xaxiemacp);
xil_printf("auto-negotiated link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED1000)
link_speed = 1000;
configure_IEEE_phy_speed(xaxiemacp, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED100)
link_speed = 100;
configure_IEEE_phy_speed(xaxiemacp, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED10)
link_speed = 10;
configure_IEEE_phy_speed(xaxiemacp, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#endif
return link_speed;
}
static void __attribute__ ((noinline)) AxiEthernetUtilPhyDelay(unsigned int Seconds)
{
#if defined (__MICROBLAZE__)
static int WarningFlag = 0;
/* If MB caches are disabled or do not exist, this delay loop could
* take minutes instead of seconds (e.g., 30x longer). Print a warning
* message for the user (once). If only MB had a built-in timer!
*/
if (((mfmsr() & 0x20) == 0) && (!WarningFlag)) {
WarningFlag = 1;
}
#define ITERS_PER_SEC (XPAR_CPU_CORE_CLOCK_FREQ_HZ / 6)
__asm volatile ("\n"
"1: \n\t"
"addik r7, r0, %0 \n\t"
"2: \n\t"
"addik r7, r7, -1 \n\t"
"bneid r7, 2b \n\t"
"or r0, r0, r0 \n\t"
"bneid %1, 1b \n\t"
"addik %1, %1, -1 \n\t"
:: "i"(ITERS_PER_SEC), "d" (Seconds));
#else
sleep(Seconds);
#endif
}
void enable_sgmii_clock(XAxiEthernet *xaxiemacp)
{
u16 phy_identifier;
u16 phy_model;
u8 phytype;
XAxiEthernet_PhySetMdioDivisor(xaxiemacp, XAE_MDIO_DIV_DFT);
u32 phy_addr = detect_phy(xaxiemacp);
/* Get the PHY Identifier and Model number */
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_1_REG, &phy_identifier);
XAxiEthernet_PhyRead(xaxiemacp, phy_addr, PHY_IDENTIFIER_2_REG, &phy_model);
if (phy_identifier == TI_PHY_IDENTIFIER) {
phy_model = phy_model & TI_PHY_DP83867_MODEL;
#ifndef SDT
phytype = XAxiEthernet_GetPhysicalInterface(xaxiemacp);
#else
phytype = XAxiEthernet_Get_Phy_Interface(xaxiemacp);
#endif
if (phy_model == TI_PHY_DP83867_MODEL && phytype == XAE_PHY_TYPE_SGMII) {
/* Enable SGMII Clock by switching to 6-wire mode */
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIITYPE);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_REGCR,
TI_PHY_REGCR_DEVAD_EN | TI_PHY_REGCR_DEVAD_DATAEN);
XAxiEthernet_PhyWrite(xaxiemacp, phy_addr, TI_PHY_ADDDR,
TI_PHY_SGMIICLK_EN);
}
}
}

View File

@ -1,873 +0,0 @@
/*
* Copyright (C) 2007 - 2022 Xilinx, Inc.
* Copyright (C) 2022 - 2024 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwipopts.h"
#include "xlwipconfig.h"
#if !NO_SYS
#include "FreeRTOS.h"
#include "semphr.h"
#include "timers.h"
#include "lwip/timeouts.h"
#endif
#include <stdio.h>
#include <string.h>
#include "lwip/opt.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/pbuf.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include "netif/etharp.h"
#include "netif/xadapter.h"
#include "netif/xemacliteif.h"
#include "xstatus.h"
#include "netif/xpqueue.h"
#include "xlwipconfig.h"
#include "xparameters.h"
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#include "xscugic.h"
#define INTC_DIST_BASE_ADDR XPAR_SCUGIC_DIST_BASEADDR
#else
#include "xintc.h"
#endif
#else
#include "xinterrupt_wrap.h"
#endif
/* Define those to better describe your network interface. */
#define IFNAME0 'x'
#define IFNAME1 'e'
/* Advertisement control register. */
#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_100_AND_10 (ADVERTISE_10FULL | ADVERTISE_100FULL | \
ADVERTISE_10HALF | ADVERTISE_100HALF)
#define ADVERTISE_100 (ADVERTISE_100FULL | ADVERTISE_100HALF)
#define ADVERTISE_10 (ADVERTISE_10FULL | ADVERTISE_10HALF)
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#define EMACLITE_INTR_PRIORITY_SET_IN_GIC 0xA0
#define TRIG_TYPE_RISING_EDGE_SENSITIVE 0x3
#endif
#define IEEE_CONTROL_REG_OFFSET 0
#define IEEE_STATUS_REG_OFFSET 1
#define IEEE_AUTONEGO_ADVERTISE_REG 4
#define IEEE_PARTNER_ABILITIES_1_REG_OFFSET 5
#define IEEE_PARTNER_ABILITIES_2_REG_OFFSET 8
#define IEEE_PARTNER_ABILITIES_3_REG_OFFSET 10
#define IEEE_1000_ADVERTISE_REG_OFFSET 9
#define IEEE_CTRL_1GBPS_LINKSPEED_MASK 0x2040
#define IEEE_CTRL_LINKSPEED_MASK 0x0040
#define IEEE_CTRL_LINKSPEED_1000M 0x0040
#define IEEE_CTRL_LINKSPEED_100M 0x2000
#define IEEE_CTRL_LINKSPEED_10M 0x0000
#define IEEE_CTRL_RESET_MASK 0x8000
#define IEEE_CTRL_AUTONEGOTIATE_ENABLE 0x1000
#define IEEE_STAT_AUTONEGOTIATE_CAPABLE 0x0008
#define IEEE_STAT_AUTONEGOTIATE_COMPLETE 0x0020
#define IEEE_STAT_AUTONEGOTIATE_RESTART 0x0200
#define IEEE_STAT_1GBPS_EXTENSIONS 0x0100
#define IEEE_AN1_ABILITY_MASK 0x1FE0
#define IEEE_AN3_ABILITY_MASK_1GBPS 0x0C00
#define IEEE_AN1_ABILITY_MASK_100MBPS 0x0380
#define IEEE_AN1_ABILITY_MASK_10MBPS 0x0060
#define PHY_DETECT_REG 1
#define PHY_DETECT_MASK 0x1808
/* Forward declarations. */
static err_t xemacliteif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr);
unsigned get_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep);
unsigned configure_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep, unsigned speed);
/* The payload from multiple pbufs is assembled into a single contiguous
* area for transmission. Currently this is a global variable (it should really
* belong in the per netif structure), but that is ok since this can be used
* only in a protected context
*/
unsigned char xemac_tx_frame[XEL_MAX_FRAME_SIZE] __attribute__((aligned(64)));
#if !NO_SYS
extern u32 xInsideISR;
#endif
#ifndef XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ
#if XPAR_INTC_0_HAS_FAST == 1
/*********** Function Prototypes *********************************************/
/*
* Function prototypes of the functions used for registering Fast
* Interrupt Handlers
*/
static void XEmacLite_FastInterruptHandler(void)
__attribute__ ((fast_interrupt));
/**************** Variable Declarations **************************************/
/** Variables for Fast Interrupt handlers ***/
XEmacLite *xemaclitep_fast;
#endif
#endif
static void
xemacif_recv_handler(void *arg) {
struct xemac_s *xemac = (struct xemac_s *)(arg);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
XEmacLite *instance = xemacliteif->instance;
struct pbuf *p;
int len = 0;
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#if !NO_SYS
xInsideISR++;
#endif
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#else
#ifndef SDT
XIntc_AckIntr(xtopologyp->intc_baseaddr, 1 << xtopologyp->intc_emac_intr);
#endif
#endif
p = pbuf_alloc(PBUF_RAW, XEL_MAX_FRAME_SIZE, PBUF_POOL);
if (!p) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
/* receive and just ignore the frame.
* we need to receive the frame because otherwise emaclite will
* not generate any other interrupts since it cannot receive,
* and we do not actively poll the emaclite
*/
XEmacLite_Recv(instance, xemac_tx_frame);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* receive the packet */
len = XEmacLite_Recv(instance, p->payload);
if (len == 0) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
pbuf_free(p);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
/* store it in the receive queue, where it'll be processed by xemacif input thread */
if (pq_enqueue(xemacliteif->recv_q, (void*)p) < 0) {
#if LINK_STATS
lwip_stats.link.memerr++;
lwip_stats.link.drop++;
#endif
pbuf_free(p);
#if !NO_SYS
xInsideISR--;
#endif
return;
}
#if !NO_SYS
sys_sem_signal(&xemac->sem_rx_data_available);
xInsideISR--;
#endif
}
int transmit_packet(XEmacLite *instancep, void *packet, unsigned len)
{
XStatus result = 0;
/* there is space for a buffer, so transfer */
result = XEmacLite_Send(instancep, packet, len);
if (result != XST_SUCCESS) {
return -1;
}
return 0;
}
/*
* this function is always called with interrupts off
* this function also assumes that there is space to send in the Emaclite buffer
*/
static err_t
_unbuffered_low_level_output(XEmacLite *instancep, struct pbuf *p)
{
struct pbuf *q;
int total_len = 0;
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
for(q = p, total_len = 0; q != NULL; q = q->next) {
/* Send the data from the pbuf to the interface, one pbuf at a
time. The size of the data in each pbuf is kept in the ->len
variable. */
memcpy(xemac_tx_frame + total_len, q->payload, q->len);
total_len += q->len;
}
if (transmit_packet(instancep, xemac_tx_frame, total_len) < 0) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
}
#if ETH_PAD_SIZE
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif
#if LINK_STATS
lwip_stats.link.xmit++;
#endif /* LINK_STATS */
return ERR_OK;
}
/*
* low_level_output():
*
* Should do the actual transmission of the packet. The packet is
* contained in the pbuf that is passed to the function. This pbuf
* might be chained.
*
*/
static err_t
low_level_output(struct netif *netif, struct pbuf *p)
{
SYS_ARCH_DECL_PROTECT(lev);
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
XEmacLite *instance = xemacliteif->instance;
struct pbuf *q;
SYS_ARCH_PROTECT(lev);
/* check if space is available to send */
if (XEmacLite_TxBufferAvailable(instance) == TRUE) {
if (pq_qlength(xemacliteif->send_q)) { /* send backlog */
_unbuffered_low_level_output(instance, (struct pbuf *)pq_dequeue(xemacliteif->send_q));
} else { /* send current */
_unbuffered_low_level_output(instance, p);
SYS_ARCH_UNPROTECT(lev);
return ERR_OK;
}
}
/* if we cannot send the packet immediately, then make a copy of the whole packet
* into a separate pbuf and store it in send_q. We cannot enqueue the pbuf as is
* since parts of the pbuf may be modified inside lwIP.
*/
q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_POOL);
if (!q) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
SYS_ARCH_UNPROTECT(lev);
return ERR_MEM;
}
for (q->len = 0; p; p = p->next) {
memcpy(q->payload + q->len, p->payload, p->len);
q->len += p->len;
}
if (pq_enqueue(xemacliteif->send_q, (void *)q) < 0) {
#if LINK_STATS
lwip_stats.link.drop++;
#endif
SYS_ARCH_UNPROTECT(lev);
return ERR_MEM;
}
SYS_ARCH_UNPROTECT(lev);
return ERR_OK;
}
static void
xemacif_send_handler(void *arg) {
struct xemac_s *xemac = (struct xemac_s *)(arg);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
XEmacLite *instance = xemacliteif->instance;
struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
#if !NO_SYS
xInsideISR++;
#endif
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
#else
#ifndef SDT
XIntc_AckIntr(xtopologyp->intc_baseaddr, 1 << xtopologyp->intc_emac_intr);
#endif
#endif
if (pq_qlength(xemacliteif->send_q) && (XEmacLite_TxBufferAvailable(instance) == TRUE)) {
struct pbuf *p = pq_dequeue(xemacliteif->send_q);
_unbuffered_low_level_output(instance, p);
pbuf_free(p);
}
#if !NO_SYS
xInsideISR--;
#endif
}
/*
* low_level_input():
*
* Should allocate a pbuf and transfer the bytes of the incoming
* packet from the interface into the pbuf.
*
*/
static struct pbuf *
low_level_input(struct netif *netif)
{
struct xemac_s *xemac = (struct xemac_s *)(netif->state);
xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state);
/* see if there is data to process */
if (pq_qlength(xemacliteif->recv_q) == 0)
return NULL;
/* return one packet from receive q */
return (struct pbuf *)pq_dequeue(xemacliteif->recv_q);
}
/*
* xemacliteif_output():
*
* This function is called by the TCP/IP stack when an IP packet
* should be sent. It calls the function called low_level_output() to
* do the actual transmission of the packet.
*
*/
err_t
xemacliteif_output(struct netif *netif, struct pbuf *p,
const ip_addr_t *ipaddr)
{
/* resolve hardware address, then send (or queue) packet */
return etharp_output(netif, p, ipaddr);
}
/*
* xemacliteif_input():
*
* This function should be called when a packet is ready to be read
* from the interface. It uses the function low_level_input() that
* should handle the actual reception of bytes from the network
* interface.
*
* Returns the number of packets read (max 1 packet on success,
* 0 if there are no packets)
*
*/
int
xemacliteif_input(struct netif *netif)
{
struct eth_hdr *ethhdr;
struct pbuf *p;
SYS_ARCH_DECL_PROTECT(lev);
#if !NO_SYS
while (1)
#endif
{
SYS_ARCH_PROTECT(lev);
/* move received packet into a new pbuf */
p = low_level_input(netif);
SYS_ARCH_UNPROTECT(lev);
/* no packet could be read, silently ignore this */
if (p == NULL)
return 0;
/* points to packet payload, which starts with an Ethernet header */
ethhdr = p->payload;
#if LINK_STATS
lwip_stats.link.recv++;
#endif /* LINK_STATS */
switch (htons(ethhdr->type)) {
/* IP or ARP packet? */
case ETHTYPE_IP:
case ETHTYPE_ARP:
#if PPPOE_SUPPORT
/* PPPoE packet? */
case ETHTYPE_PPPOEDISC:
case ETHTYPE_PPPOE:
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (netif->input(p, netif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("xlltemacif_input: IP input error\r\n"));
pbuf_free(p);
p = NULL;
}
break;
default:
pbuf_free(p);
p = NULL;
break;
}
}
return 1;
}
#if !NO_SYS
static void
arp_timer(void *arg)
{
etharp_tmr();
sys_timeout(ARP_TMR_INTERVAL, arp_timer, NULL);
}
#endif
static XEmacLite_Config *
xemaclite_lookup_config(unsigned base)
{
XEmacLite_Config *CfgPtr = NULL;
int i;
for (i = 0; i < XPAR_XEMACLITE_NUM_INSTANCES; i++)
if (XEmacLite_ConfigTable[i].BaseAddress == base) {
CfgPtr = &XEmacLite_ConfigTable[i];
break;
}
return CfgPtr;
}
static err_t low_level_init(struct netif *netif)
{
struct xemac_s *xemac;
XEmacLite_Config *config;
XEmacLite *xemaclitep;
struct xtopology_t *xtopologyp;
xemacliteif_s *xemacliteif;
unsigned link_speed = 1000;
xemaclitep = mem_malloc(sizeof *xemaclitep);
#ifndef XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ
#if XPAR_INTC_0_HAS_FAST == 1
xemaclitep_fast = xemaclitep;
#endif
#endif
if (xemaclitep == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacliteif_init: out of memory\r\n"));
return ERR_MEM;
}
xemac = mem_malloc(sizeof *xemac);
if (xemac == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacliteif_init: out of memory\r\n"));
return ERR_MEM;
}
xemacliteif = mem_malloc(sizeof *xemacliteif);
if (xemacliteif == NULL) {
LWIP_DEBUGF(NETIF_DEBUG, ("xemacliteif_init: out of memory\r\n"));
return ERR_MEM;
}
/* obtain pointer to topology structure for this emac */
xemac->topology_index = xtopology_find_index((unsigned)(netif->state));
xtopologyp = &xtopology[xemac->topology_index];
/* obtain config of this emaclite */
config = xemaclite_lookup_config((unsigned)(netif->state));
/* maximum transfer unit */
netif->mtu = XEL_MTU_SIZE;
/* broadcast capability */
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP;
/* initialize the mac */
#ifndef SDT
XEmacLite_Initialize(xemaclitep, config->DeviceId);
#else
XEmacLite_Initialize(xemaclitep, config->BaseAddress);
#endif
xemaclitep->NextRxBufferToUse = 0;
#ifndef SDT
#if XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ == 1
XScuGic_RegisterHandler(xtopologyp->scugic_baseaddr,
xtopologyp->intc_emac_intr,
(Xil_ExceptionHandler)XEmacLite_InterruptHandler,
xemaclitep);
XScuGic_SetPriTrigTypeByDistAddr(INTC_DIST_BASE_ADDR,
xtopologyp->intc_emac_intr,
EMACLITE_INTR_PRIORITY_SET_IN_GIC,
TRIG_TYPE_RISING_EDGE_SENSITIVE);
XScuGic_EnableIntr(INTC_DIST_BASE_ADDR,
xtopologyp->intc_emac_intr);
#else
#if NO_SYS
#if XPAR_INTC_0_HAS_FAST == 1
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XFastInterruptHandler)XEmacLite_FastInterruptHandler);
#else
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XInterruptHandler)XEmacLite_InterruptHandler,
xemaclitep);
#endif
#else
#if XPAR_INTC_0_HAS_FAST == 1
XIntc_RegisterFastHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XFastInterruptHandler)XEmacLite_FastInterruptHandler);
XIntc_EnableIntr(xtopologyp->intc_baseaddr, XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET) | (1 << xtopologyp->intc_emac_intr));
#else
XIntc_RegisterHandler(xtopologyp->intc_baseaddr,
xtopologyp->intc_emac_intr,
(XInterruptHandler)XEmacLite_InterruptHandler,
xemaclitep);
XIntc_EnableIntr(xtopologyp->intc_baseaddr, XIntc_In32(xtopologyp->intc_baseaddr +
XIN_IER_OFFSET) | (1 << xtopologyp->intc_emac_intr));
#endif
#endif
#endif
#else
XSetupInterruptSystem(xemaclitep, &XEmacLite_InterruptHandler,
config->IntrId,
config->IntrParent,
XINTERRUPT_DEFAULT_PRIORITY);
#endif
/* set mac address */
XEmacLite_SetMacAddress(xemaclitep, (unsigned char*)(netif->hwaddr));
/* flush any frames already received */
XEmacLite_FlushReceive(xemaclitep);
/* set Rx, Tx interrupt handlers */
XEmacLite_SetRecvHandler(xemaclitep, (void *)(xemac), xemacif_recv_handler);
XEmacLite_SetSendHandler(xemaclitep, (void *)(xemac), xemacif_send_handler);
/* enable Rx, Tx interrupts */
XEmacLite_EnableInterrupts(xemaclitep);
#if !NO_SYS
sys_sem_new(&xemac->sem_rx_data_available, 0);
#endif
/* replace the state in netif (currently the base address of emaclite)
* with the xemacliteif instance pointer.
* this contains a pointer to the config table entry
*/
xemac->type = xemac_type_xps_emaclite;
xemac->state = (void *)xemacliteif;
netif->state = (void *)xemac;
xemacliteif->instance = xemaclitep;
xemacliteif->recv_q = pq_create_queue();
if (!xemacliteif->recv_q)
return ERR_MEM;
xemacliteif->send_q = pq_create_queue();
if (!xemacliteif->send_q)
return ERR_MEM;
/* Initialize PHY */
/* set PHY <--> MAC data clock */
#ifdef CONFIG_LINKSPEED_AUTODETECT
link_speed = get_IEEE_phy_speed_emaclite(xemaclitep);
xil_printf("auto-negotiated link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED1000)
xil_printf("Link speed of 1000 Mbps not possible\r\n");
#elif defined(CONFIG_LINKSPEED100)
link_speed = 100;
configure_IEEE_phy_speed_emaclite(xemaclitep, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#elif defined(CONFIG_LINKSPEED10)
link_speed = 10;
configure_IEEE_phy_speed_emaclite(xemaclitep, link_speed);
xil_printf("link speed: %d\r\n", link_speed);
#endif
return ERR_OK;
}
static int detect_phy_emaclite(XEmacLite *xemaclitep)
{
u16 phy_reg;
u32 phy_addr;
for (phy_addr = 31; phy_addr > 0; phy_addr--) {
XEmacLite_PhyRead(xemaclitep, phy_addr, PHY_DETECT_REG, &phy_reg);
if ((phy_reg != 0xFFFF) &&
((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
/* Found a valid PHY address */
LWIP_DEBUGF(NETIF_DEBUG, ("XEMacLite detect_phy: PHY detected at address %d.\r\n", phy_addr));
LWIP_DEBUGF(NETIF_DEBUG, ("XEMacLite detect_phy: PHY detected.\r\n"));
return phy_addr;
}
}
LWIP_DEBUGF(NETIF_DEBUG, ("XEMacLite detect_phy: No PHY detected. Assuming a PHY at address 0\r\n"));
/* default to zero */
return 0;
}
unsigned get_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep)
{
u16 control;
u16 status;
u16 partner_capabilities;
u16 partner_capabilities_1000;
u16 phylinkspeed;
u32 phy_addr = detect_phy_emaclite(xemaclitep);
/* Don't advertise PHY speed of 1000 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Advertise PHY speed of 100 and 10 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100_AND_10);
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
control |= (IEEE_CTRL_AUTONEGOTIATE_ENABLE |
IEEE_STAT_AUTONEGOTIATE_RESTART);
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
control);
/* Read PHY control and status registers is successful. */
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_STATUS_REG_OFFSET,
&status);
if ((control & IEEE_CTRL_AUTONEGOTIATE_ENABLE) &&
(status & IEEE_STAT_AUTONEGOTIATE_CAPABLE)) {
while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_STATUS_REG_OFFSET,
&status);
}
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_PARTNER_ABILITIES_1_REG_OFFSET,
&partner_capabilities);
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_PARTNER_ABILITIES_3_REG_OFFSET,
&partner_capabilities_1000);
if (partner_capabilities_1000 & IEEE_AN3_ABILITY_MASK_1GBPS) return 1000;
}
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_100MBPS) return 100;
if (partner_capabilities & IEEE_AN1_ABILITY_MASK_10MBPS) return 10;
xil_printf("%s: unknown PHY link speed, setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__);
return 10;
} else {
/* Update TEMAC speed accordingly */
if (status & IEEE_STAT_1GBPS_EXTENSIONS) {
/* Get commanded link speed */
phylinkspeed = control & IEEE_CTRL_1GBPS_LINKSPEED_MASK;
switch (phylinkspeed) {
case (IEEE_CTRL_LINKSPEED_1000M):
return 1000;
case (IEEE_CTRL_LINKSPEED_100M):
return 100;
case (IEEE_CTRL_LINKSPEED_10M):
return 10;
default:
xil_printf("%s: unknown PHY link speed (%d), setting TEMAC speed to be 10 Mbps\r\n",
__FUNCTION__, phylinkspeed);
return 10;
}
} else {
return (control & IEEE_CTRL_LINKSPEED_MASK) ? 100 : 10;
}
}
}
unsigned configure_IEEE_phy_speed_emaclite(XEmacLite *xemaclitep, unsigned speed)
{
u16 control;
u32 phy_addr = detect_phy_emaclite(xemaclitep);
XEmacLite_PhyRead(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
&control);
control &= ~IEEE_CTRL_LINKSPEED_100M;
control &= ~IEEE_CTRL_LINKSPEED_10M;
if (speed == 100) {
control |= IEEE_CTRL_LINKSPEED_100M;
/* Don't advertise PHY speed of 1000 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 10 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_100);
}
else if (speed == 10) {
control |= IEEE_CTRL_LINKSPEED_10M;
/* Don't advertise PHY speed of 1000 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_1000_ADVERTISE_REG_OFFSET,
0);
/* Don't advertise PHY speed of 100 Mbps */
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_AUTONEGO_ADVERTISE_REG,
ADVERTISE_10);
}
XEmacLite_PhyWrite(xemaclitep, phy_addr,
IEEE_CONTROL_REG_OFFSET,
control | IEEE_CTRL_RESET_MASK);
{
volatile int wait;
for (wait=0; wait < 100000; wait++);
for (wait=0; wait < 100000; wait++);
}
return 0;
}
/*
* xemacliteif_init():
*
* Should be called at the beginning of the program to set up the
* network interface. It calls the function low_level_init() to do the
* actual setup of the hardware.
*
*/
err_t
xemacliteif_init(struct netif *netif)
{
#if LWIP_SNMP
/* ifType ethernetCsmacd(6) @see RFC1213 */
netif->link_type = 6;
/* your link speed here */
netif->link_speed = ;
netif->ts = 0;
netif->ifinoctets = 0;
netif->ifinucastpkts = 0;
netif->ifinnucastpkts = 0;
netif->ifindiscards = 0;
netif->ifoutoctets = 0;
netif->ifoutucastpkts = 0;
netif->ifoutnucastpkts = 0;
netif->ifoutdiscards = 0;
#endif
netif->name[0] = IFNAME0;
netif->name[1] = IFNAME1;
netif->output = xemacliteif_output;
netif->linkoutput = low_level_output;
low_level_init(netif);
#if !NO_SYS
sys_timeout(ARP_TMR_INTERVAL, arp_timer, NULL);
#endif
return ERR_OK;
}
#ifndef XLWIP_CONFIG_INCLUDE_EMACLITE_ON_ZYNQ
#if XPAR_INTC_0_HAS_FAST == 1
/****************** Fast Interrupt Handler **********************************/
void XEmacLite_FastInterruptHandler (void)
{
XEmacLite_InterruptHandler((void *)xemaclitep_fast);
}
#endif
#endif

View File

@ -0,0 +1,27 @@
set (LWIP_INCLUDE_DIRS_XIL
"${LWIP_DIR}/src/include"
"${LWIP_DIR}/src/include/compat/posix"
"include"
)
set(lwip_SRCS_XIL
${lwipcore_SRCS}
${lwipcore4_SRCS}
${lwipcore6_SRCS}
${lwipnetif_SRCS}
${lwipapi_SRCS}
#${LWIP_DIR}/contrib/ports/freertos/sys_arch.c
#${LWIP_DIR}/src/netif/slipif.c
#${LWIP_DIR}/src/apps/tftp/tftp.c
)
add_library(lwip_xil ${lwip_SRCS_XIL})
target_include_directories(lwip_xil PUBLIC ${LWIP_INCLUDE_DIRS_XIL})
target_sources(lwip_xil PRIVATE xethernet.c)
add_subdirectory(netif)
add_subdirectory(port)

View File

@ -0,0 +1,25 @@
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include "lwipopts.h"
/** if you want to use the struct timeval provided
* by your system, set this to 0 and include <sys/time.h> in cc.h */
#define LWIP_TIMEVAL_PRIVATE 0
#include <sys/time.h>
// errno is a macro. If we define LWIP_ERRNO_INCLUDE to errno.h the preprocessor will replace it,
// breaking the include. Instead we supply a helper include which in turn includes errno.h
#define LWIP_ERRNO_INCLUDE <onrre.h>
#define LWIP_RAND rand
#define PACK_STRUCT_FIELD(x) x
#define PACK_STRUCT_STRUCT __attribute__((packed))
#define PACK_STRUCT_BEGIN
#define PACK_STRUCT_END
#define LWIP_PLATFORM_ASSERT(x)
#define LWIP_PLATFORM_DIAG(x) do { printf x; } while(0)

View File

@ -0,0 +1,5 @@
LWIP_MALLOC_MEMPOOL_START
LWIP_MALLOC_MEMPOOL(50, 256)
LWIP_MALLOC_MEMPOOL(50, 512)
LWIP_MALLOC_MEMPOOL(50, 1550)
LWIP_MALLOC_MEMPOOL_END

View File

@ -0,0 +1,2 @@
#pragma once
#include <errno.h>

View File

@ -1,4 +1,4 @@
target_sources(lwip PRIVATE
target_sources(lwip_xil PRIVATE
xadapter.c
xpqueue.c
xemacpsif_dma.c
@ -8,5 +8,5 @@ target_sources(lwip PRIVATE
topology.c
)
target_include_directories(lwip PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(lwip_xil PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})

View File

@ -1,4 +1,4 @@
target_sources(lwip PRIVATE
target_sources(lwip_xil PRIVATE
sys_arch.c
sys_arch_raw.c
)

View File

@ -56,8 +56,6 @@ void mission(void);
void initFreeRTOSHelper();
void testEth();
int main(void) {
/* Configure the hardware ready to run the demo. */
@ -65,9 +63,9 @@ int main(void) {
// printf("Booting Software\n");
// testEth();
//testEth();
mission();
mission();
}
static void prvSetupHardware(void) {

View File

@ -1 +1 @@
target_sources(${TARGET_NAME} PRIVATE close.c read.c write.c)
target_sources(bsp PUBLIC close.c read.c write.c)

View File

@ -4,4 +4,7 @@ add_subdirectory(libsrc/scugic)
add_subdirectory(libsrc/scutimer)
add_subdirectory(libsrc/scuwdt)
add_subdirectory(libsrc/standalone)
add_subdirectory(libsrc/uartps)
add_subdirectory(libsrc/uartps)
target_include_directories(
bsp PUBLIC include)

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/xemacps_bdring.c
src/xemacps.c
src/xemacps_control.c

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/xgpiops_g.c
src/xgpiops_hw.c
src/xgpiops_intr.c

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/xscugic_g.c
src/xscugic.c
src/xscugic_intr.c

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/xscutimer.c
src/xscutimer_sinit.c
src/xscutimer_g.c

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/xscuwdt_sinit.c
src/xscuwdt_selftest.c
src/xscuwdt_g.c

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/translation_table.S
src/cpu_init.S
src/boot.S

View File

@ -1,4 +1,4 @@
target_sources(${TARGET_NAME} PUBLIC
target_sources(bsp PUBLIC
src/xuartps.c
src/xuartps_hw.c
src/xuartps_intr.c

View File

@ -1 +1,2 @@
target_include_directories(${TARGET_NAME} PRIVATE include)
# TODO directly referencing bsp is not correct
target_include_directories(bsp PUBLIC include)

View File

@ -1 +1,3 @@
target_sources(${TARGET_NAME} PRIVATE mission.c freeRTOS_rust_helper.c)

View File

@ -0,0 +1,7 @@
add_executable(sim_interface mission.c testEth.c)
target_link_options(sim_interface PRIVATE -Wl,--cref -Wl,-Map=${TARGET_NAME}.map -mcpu=cortex-a9 -mfpu=vfpv3 -mfloat-abi=hard -Wl,-build-id=none -T${CMAKE_SOURCE_DIR}/bsp_z7/freeRTOS/lscript.ld -specs=${CMAKE_SOURCE_DIR}/bsp_z7/freeRTOS/Xilinx.spec )
target_link_libraries(sim_interface PRIVATE bsp lwip_xil freertos_kernel)
target_link_options(sim_interface PRIVATE "-Wl,--gc-sections")

49
sim_interface/mission.c Normal file
View File

@ -0,0 +1,49 @@
/* Scheduler include files. */
#include "FreeRTOS.h"
#include "semphr.h"
#include "task.h"
int testEth();
void mission(void) {
testEth();
}
/*-----------------------------------------------------------*/
void vApplicationStackOverflowHook(TaskHandle_t pxTask, char *pcTaskName) {
(void)pcTaskName;
(void)pxTask;
/* Run time stack overflow checking is performed if
configCHECK_FOR_STACK_OVERFLOW is defined to 1 or 2. This hook
function is called if a stack overflow is detected. */
taskDISABLE_INTERRUPTS();
// TODO panic
for (;;)
;
}
/*-----------------------------------------------------------*/
void vApplicationMallocFailedHook(void) {
/* Called if a call to pvPortMalloc() fails because there is insufficient
free memory available in the FreeRTOS heap. pvPortMalloc() is called
internally by FreeRTOS API functions that create tasks, queues, software
timers, and semaphores. The size of the FreeRTOS heap is set by the
configTOTAL_HEAP_SIZE configuration constant in FreeRTOSConfig.h. */
taskDISABLE_INTERRUPTS();
for (;;)
;
}
void rust_assert_called(const char *pcFile, unsigned long ulLine);
void vAssertCalled(const char *pcFile, unsigned long ulLine) {
taskDISABLE_INTERRUPTS();
for (;;)
;
}
/*-----------------------------------------------------------*/
/*-----------------------------------------------------------*/