You need to sign in or sign up before continuing.
Newer
Older
/*
This is a demonstration for establishing a network
between VMK180 and host PC over CPM. This is the common
network interface device driver.
Elena Zhivun <elena.zhivun@cern.ch>
*/
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/timer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/version.h>
- disable tx and rx on the peers without "link" present
- it's possible to make TX queue circular buffer lockless
- user interrupts instead of timer; use NAPI
BUGs:
- first ICMP packet takes 1-2 seconds to arrive?
// MAX_PACKET_LEN is measured in FIFO words
#define MAX_PACKET_LEN 400
#define BYTES_IN_WORD 4
#define MAX_DATA_LEN MAX_PACKET_LEN * BYTES_IN_WORD
#define MAX_RETRY_ATTEMPTS 5
int tx_queue_size = 256;
DEFINE_SPINLOCK(tx_queue_spinlock);
struct tx_queue tx_queue; //[MAXCARDS]
static LIST_HEAD(peers);
struct mutex peer_mutex;
atomic_t peer_count;
wait_queue_head_t recv_queue;
struct task_struct *recv_task;
#define FLXNET_SOP 0xCACE0000
#define FLXNET_SOP_MASK 0xFFFF0000
#define FLXNET_LEN_MASK 0x0000FFFF
#define FLXNET_EOP 0xFA18CECA
#define RX_SUCCESS 0
#define RX_LOW_BUFFER 1
#define RX_INVALID_ARG 2
#define RX_TIMEOUT 3
#define RX_INVALID_EOP 4
#define RX_NO_MEMORY 5
#define RX_TRUNCATED 6
#define TX_SUCCESS 0
#define TX_TRY_BROADCAST 1
#define TX_BUSY 2
/* Read/Write access to the MMAPped registers */
#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
# define register_read(offset) readl(offset)
# define register_write(offset, val) writel(val, offset)
#else
# define register_read(offset) __raw_readl(offset)
# define register_write(offset, val) __raw_writel(val, offset)
#endif
static inline bool check_flag(void* __iomem reg, int bit_id) {
return ((1 << bit_id) & register_read(reg)) != 0;
}
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
static inline void set_flag(void* __iomem reg, int bit_id) {
uint32_t temp_reg = register_read(reg);
temp_reg |= (1 << bit_id);
pr_info("flxnet_dev: temp_reg is %u", temp_reg);
register_write(reg, temp_reg);
pr_info("flxnet_dev: flag set");
}
static inline void clear_flag(void* __iomem reg, int bit_id) {
uint32_t temp_reg = register_read(reg);
temp_reg &= ~(1 << bit_id);
pr_info("flxnet_dev: temp_reg is %u", temp_reg);
register_write(reg, temp_reg);
pr_info("flxnet_dev: flag cleared");
}
/* static inline void print_status_reg(struct flxnet_peer *peer) {
uint32_t temp_reg = register_read(peer->regs + peer->reg_status);
pr_info("flxnet_dev: SEND_EOP: %d\nRECV_EOP: %d\nROOM_FOR_BLOCK: %d\nROOM_FOR_WORD: %d\nBLOCK_AVAILABLE: %d\nWORD_AVAILABLE: %d"
(temp_reg & (1<<SEND_IS_EOP) >0),
(temp_reg & (1<<RECV_IS_EOP) >0),
(temp_reg & (1<<HAS_ROOM_FOR_BLOCK) >0),
(temp_reg & (1<<HAS_ROOM_FOR_WORD) >0),
(temp_reg & (1<<BLOCK_AVAILABLE) >0),
(temp_reg & (1<<WORD_AVAILABLE) >0)
)
} */
static inline bool is_word_available(struct flxnet_peer *peer) {
return check_flag(peer->regs + peer->reg_status, WORD_AVAILABLE);
}
static inline bool is_block_available(struct flxnet_peer *peer) {
return check_flag(peer->regs + peer->reg_status, BLOCK_AVAILABLE);
}
static inline bool can_accept_word(struct flxnet_peer *peer) {
return check_flag(peer->regs + peer->reg_status, HAS_ROOM_FOR_WORD);
}
static inline bool can_accept_block(struct flxnet_peer *peer) {
return check_flag(peer->regs + peer->reg_status, HAS_ROOM_FOR_BLOCK);
}
static inline bool recv_eop(struct flxnet_peer *peer) {
return check_flag(peer->regs + peer->reg_status, RECV_IS_EOP);
}
// Send a single word to the device
static inline void send_word(struct flxnet_peer* peer, u32 data) {
register_write(peer->regs + peer->reg_send, data);
}
// Receive a single word
static inline t_fifo_word recv_word(struct flxnet_peer* peer) {
return (t_fifo_word)register_read(peer->regs + peer->reg_recv);
// send end of packet
static inline void send_eop(struct flxnet_peer *peer) {
set_flag(peer->regs + peer->reg_status, SEND_IS_EOP);
send_word(peer, FLXNET_EOP);
pr_info("flxnet_dev: end of packet sent");
clear_flag(peer->regs + peer->reg_status, SEND_IS_EOP);
}
// Verify that the device magic number is correct
inline bool is_magic_correct(struct flxnet_peer* peer) {

Frans Schreuder
committed
return (register_read(peer->regs + TARGET_MAGIC_REG) == FLX_DEV_MAGIC);

Frans Schreuder
committed
return (register_read(peer->regs + HOST_MAGIC_REG) == FLX_DEV_MAGIC);
/*
Removes the peer from the network, assumes
that peer_mutex is held
*/
pr_info("flxnet_dev: removing peer %p from the network\n", peer);
pr_err("flxnet_dev: BUG in %s: peer_count = %d, less than 0\n",
kfree(peer);
}
// Removes all peers from the network
pr_info("flxnet_dev: removing all peers from the network\n");
}
mutex_unlock(&peer_mutex);
}
// mark as not reserved so that peer modules can be removed
void flxnet_release_all_peers(void) {
struct flxnet_peer *peer;
struct flxnet_peer *tmp;
mutex_lock(&peer_mutex);
list_for_each_entry_safe(peer, tmp, &peers, list_head) {
if (peer->reserved) {
module_put(peer->owner);
peer->reserved = false;
} else {
pr_info("flxnet_dev: trying to release peer %p, which is not reserved, skipping\n", peer);
}
}
mutex_unlock(&peer_mutex);
}
// mark as reserved so that peer modules cannot be removed and iomem stay there
void flxnet_reserve_all_peers(void) {
struct flxnet_peer *peer;
struct flxnet_peer *tmp;
mutex_lock(&peer_mutex);
list_for_each_entry_safe(peer, tmp, &peers, list_head) {
if (!peer->reserved) {
peer->reserved = try_module_get(peer->owner);
if (!peer->reserved) {
pr_err("flxnet_dev: failed to reserve peer %p, owner module is missing\n", peer);
pr_info("flxnet_dev: trying to reserve peer %p, which is already reserved, skipping\n", peer);
static bool tx_queue_is_full(void) {
bool rv;
unsigned long flags;
spin_lock_irqsave(&tx_queue_spinlock, flags);
rv = CIRC_SPACE(tx_queue.head, tx_queue.tail, tx_queue_size) == 0;
spin_unlock_irqrestore(&tx_queue_spinlock, flags);
return rv;
static bool tx_queue_is_empty(void) {
bool rv;
unsigned long flags;
spin_lock_irqsave(&tx_queue_spinlock, flags);
rv = CIRC_CNT(tx_queue.head, tx_queue.tail, tx_queue_size) == 0;
spin_unlock_irqrestore(&tx_queue_spinlock, flags);
return rv;
static int tx_queue_try_push(struct sk_buff *skb) {
struct tx_packet *packet;
if (tx_queue_is_full()) {
if (message_level & NETIF_MSG_TX_ERR)
pr_info("flxnet_dev: trying to add a packet to TX queue, but it is full\n");
pr_err("flxnet_dev: (BUG) TX packet payload is too large "
"(%d bytes, maximum is %d)\n", skb->len, MAX_DATA_LEN);
return TX_ERROR;
spin_lock_irqsave(&tx_queue_spinlock, flags);
packet = &tx_queue.packets[tx_queue.head];
packet->num_words = skb->len / BYTES_IN_WORD;
memcpy(packet->data, skb->data, skb->len);
packet->num_words += 1;
memset(packet->data + skb->len, 0, BYTES_IN_WORD - (skb->len % BYTES_IN_WORD));
memcpy(packet->dest_mac, mac_hdr->h_dest, ETH_ALEN);
packet->attempts = 0;
tx_queue.head = (tx_queue.head + 1) & (tx_queue_size - 1);
pr_warn("flxnet_dev: trying to remove a packet from TX queue, but it is empty\n");
tx_queue.tail = (tx_queue.tail + 1) & (tx_queue_size - 1);
spin_unlock_irqrestore(&tx_queue_spinlock, flags);
return 0;
flxnet->stats.tx_dropped += CIRC_CNT(tx_queue.head, tx_queue.tail, tx_queue_size);
spin_lock_irqsave(&tx_queue_spinlock, flags);
tx_queue.head = 0;
spin_lock_irqsave(&tx_queue_spinlock, flags);
result = &tx_queue.packets[tx_queue.tail];
spin_unlock_irqrestore(&tx_queue_spinlock, flags);
// waits for EOP, returns 0 if it is received correctly, -1 otherwise
bool is_valid_eop(struct flxnet_peer *peer, unsigned long timeout) {
while (true) {
if (is_word_available(peer)) {
eop = recv_word(peer);
break;
}
// check the mac address of the received packet
// and update the peer mac if valid
void update_mac(struct flxnet_peer *peer, struct sk_buff *skb) {
struct ethhdr *mac_hdr;
mac_hdr = eth_hdr(skb);
if (!is_valid_ether_addr(mac_hdr->h_source)) {
pr_info("flxnet_dev: mac address of received packet is invalid\n");
pr_info("flxnet_dev: updating peer mac address: was %pM, new %pM\n",
peer->mac_addr, mac_hdr->h_source);
}
memcpy(peer->mac_addr, mac_hdr->h_source, ETH_ALEN);
}
}
// there is no memory to create skb, so deque the packet
// and drop it
int drop_rx_packet(struct flxnet_peer *peer, int length_words) {
int num_words;
unsigned long timeout;
pr_err("flxnet_dev: attempting to drop 0-length packet\n");
num_words = 0;
while (true) {
recv_word(peer);
num_words += 1;
if (num_words == length_words) {
goto packet_rx_done;
}
return RX_TIMEOUT;
}
if (num_words != length_words) {
return RX_TRUNCATED;
}
return RX_INVALID_EOP;
}
return RX_NO_MEMORY;
}
int recv_packet(struct flxnet_peer *peer, int length_words) {
struct sk_buff *skb;
int num_words;
pr_err("flxnet_dev: invalid packet length (%d words)\n",
return RX_INVALID_ARG;
}
pr_err("flxnet_dev: dropped RX packet, low memory\n");
drop_rx_packet(peer, length_words);
return RX_NO_MEMORY;
pr_info("flxnet_dev: received packet data:\n");
memcpy(skb_put(skb, BYTES_IN_WORD), &rx_word, BYTES_IN_WORD);
if (message_level & NETIF_MSG_PKTDATA)
pr_info("%08X\n", rx_word);
if (num_words == length_words)
goto packet_rx_done;
pr_err("flxnet_dev: error receiving packet, timeout\n");
return RX_TIMEOUT;
}
if (num_words != length_words) {
dev_kfree_skb(skb);
pr_err_ratelimited("flxnet_dev: error receiving packet, missing data\n");
pr_err_ratelimited("flxnet_dev: received a packet with invalid trailer\n");
return RX_INVALID_EOP;
}
skb->protocol = eth_type_trans(skb, flxnet);
pr_info("flxnet_dev: received packet (%d bytes, %d words) from peer %p\n",

Frans Schreuder
committed
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)
netif_rx(skb);
#else

Frans Schreuder
committed
#endif
if (low_buffer)
return RX_LOW_BUFFER;
else
return RX_SUCCESS;
}
/*
Timer to wake up FIFO polling thread periodically
*/
void recv_timer_function(struct timer_list *timer_list) {
wake_up_interruptible(&recv_queue);
mod_timer(timer_list, jiffies + msecs_to_jiffies(POLL_PERIOD_MS));
}
// returns true if the packet can be received by at least one peer
// if there are no peers, packets can be marked as "sent" and
// dropped immediately
mutex_lock(&peer_mutex);
list_for_each_entry(peer, &peers, list_head) {
if (is_magic_correct(peer) && can_accept_block(peer)) {
mutex_unlock(&peer_mutex);
return true;
}
}
mutex_unlock(&peer_mutex);
return false;
/*
Iterate over all peers, retrieve all packets under the assumption that
CPU can receive them faster than FIFO fills up
*/
void rx_all_packets(void) {
struct flxnet_peer *peer;
struct flxnet_peer *tmp;
int length_words, rv;
mutex_lock(&peer_mutex);
list_for_each_entry_safe(peer, tmp, &peers, list_head) {
if (!peer->reserved) continue;
if (!is_magic_correct(peer)) {
pr_err("flxnet_dev: invalid magic in %s, removing peer %p from the network\n",
__FUNCTION__, peer);
flxnet_remove_peer(peer);
continue;
}
while (is_block_available(peer)) {
if (message_level & NETIF_MSG_RX_STATUS)
pr_info("flxnet_dev: peer %p has data block available\n", peer);
sop = recv_word(peer);
if ((sop & FLXNET_SOP_MASK) != FLXNET_SOP) {
if (message_level & NETIF_MSG_RX_ERR)
pr_err("flxnet_dev: invalid SOP: %#010X\n", sop);
flxnet->stats.rx_frame_errors += 1;
continue;
}
// found the beginning of a packet
length_words = sop & FLXNET_LEN_MASK;
rv = recv_packet(peer, length_words);
if (rv == RX_TIMEOUT || rv == RX_LOW_BUFFER) {
// receiving this packet took too long
// move on to the next peer
if (message_level & NETIF_MSG_RX_STATUS)
pr_err("flxnet_dev: low buffer on peer %p, abort reading\n", peer);
break;
} else if (rv == RX_NO_MEMORY) {
mutex_unlock(&peer_mutex);
pr_err("flxnet_dev: RX thread is out of memory\n");
/*
Background task for polling the FIFOs and reading out packets
I don't want these in NAPI poll because this can be slow and
hence potentially interfere with other network devices
*/
pr_info("flxnet_dev: entered FIFO polling thread\n");
while (!kthread_should_stop()) {
if (tx_queue_is_empty()) {
prepare_to_wait(&recv_queue, &wait, TASK_INTERRUPTIBLE);
schedule();
finish_wait(&recv_queue, &wait);
}
// pr_info("flxnet: wake up worker thread %s\n", __FUNCTION__);
tx_all_packets();
if (!netif_running(flxnet) && can_transmit_packets()) {
pr_info("flxnet_dev: wake up net if queue (%s)\n", __FUNCTION__);
pr_info("flxnet_dev: stopping %s\n", __FUNCTION__);
return 0;
}
int open(struct net_device *dev) {
flxnet_reserve_all_peers(); // reserve the modules that supplied the peers
tx_queue_reset(); // clear TX queue
pr_info("flxnet_dev: initialize RX thread and timer");
recv_task = kthread_create(send_recv_thread, NULL, "flxnet RX thread");
pr_err("flxnet_dev: failed to create RX thread in %s", __FUNCTION__);
mod_timer(&recv_timer, jiffies + msecs_to_jiffies(POLL_PERIOD_MS));
pr_info("flxnet_dev: start netif queue");
int stop(struct net_device *dev) {
mutex_lock(&flxnet_mutex);
pr_info("flxnet_dev: stop RX thread");
wake_up_interruptible(&recv_queue);
kthread_stop(recv_task);
del_timer_sync(&recv_timer);
pr_info("flxnet_dev: stop netif queue");
tx_queue_reset(); // clear TX queue
flxnet_release_all_peers(); // free the modules that supplied the peers
static inline void drop_this_packet(void) {
tx_queue_pop();
flxnet->stats.tx_dropped += 1;
}
int transmit_packet(struct flxnet_peer *peer, struct tx_packet *packet) {
pr_err("flxnet_dev: invalid magic in %s, removing peer %p from the network,"
" and dropping the packet", __FUNCTION__, peer);
if (!can_accept_block(peer))
return TX_BUSY;
pr_info("flxnet_dev: transmitting data:");
send_word(peer, FLXNET_SOP | num_words);
for (i = 0; i < num_words; i++) {
memcpy(&data, &packet->data[i * BYTES_IN_WORD], BYTES_IN_WORD);
send_word(peer, data);
if (message_level & NETIF_MSG_PKTDATA)
pr_info("%08x\n", data);
pr_info("flxnet_dev: sent packet (%d words) to peer %p", num_words, peer);
flxnet->stats.tx_packets += 1;
flxnet->stats.tx_bytes += num_words * BYTES_IN_WORD;
return TX_SUCCESS;
int send_broadcast(struct tx_packet *packet) {
}
mutex_unlock(&peer_mutex);
if (rv == TX_SUCCESS) tx_queue_pop();
if ((rv == TX_BUSY) && (message_level & NETIF_MSG_TX_ERR)) {
pr_info("flxnet_dev: no peer has space in FIFO (total %d peers)",
// send out the latest buffered packet
int send_packet(struct tx_packet *packet) {
int num_peers, rv;
num_peers = atomic_read(&peer_count);
if (num_peers == 0) {
pr_info("flxnet_dev: discard packet - no peers available");
packet->attempts += 1;
if (packet->attempts > MAX_RETRY_ATTEMPTS) {
pr_info("flxnet_dev: packet %p is dropped after %d attempts",
if (num_peers == 1 || is_multicast_ether_addr(packet->dest_mac))
return send_broadcast(packet);
mutex_lock(&peer_mutex);
if (!peer->reserved) continue;
if (memcmp(peer->mac_addr, packet->dest_mac, ETH_ALEN) == 0) {
// matching peer is found in the table
// matching peer was not found on the list
return send_broadcast(packet);
static void tx_all_packets(void) {
struct tx_packet *packet;
while (true) {
packet = tx_queue_get_next();
if (packet == NULL) break;
if (send_packet(packet) == TX_BUSY) break;
}
}
int hard_start_xmit(struct sk_buff *skb, struct net_device *dev) {
pr_info("flxnet_dev: TX packet queue is full");
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)
static void tx_timeout (struct net_device *dev, unsigned int txqueue)
#else
static void tx_timeout (struct net_device *dev)
#endif
pr_info("flxnet_dev: wake up netif queue (%s)", __FUNCTION__);
static struct net_device_stats * get_stats(struct net_device *dev)
{
return &dev->stats;
}
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
static void flx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int flx_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
cmd->base.speed = SPEED_10;
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_OTHER;
return 0;
}
static int flx_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
return -EINVAL;
}
static u32 flx_get_link(struct net_device *dev)
{
return (u32)(atomic_read(&peer_count) != 0);
}
static u32 flx_get_msglevel(struct net_device *dev)
{
}
static void flx_set_msglevel(struct net_device *dev, u32 v)
{
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = flx_get_drvinfo,
.get_link = flx_get_link,
.get_msglevel = flx_get_msglevel,
.set_msglevel = flx_set_msglevel,
.get_link_ksettings = flx_get_link_ksettings,
.set_link_ksettings = flx_set_link_ksettings,
};
static const struct net_device_ops flx_netdev_ops = {
.ndo_open = open,
.ndo_stop = stop,
.ndo_start_xmit = hard_start_xmit,
.ndo_tx_timeout = tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
union zynq_hw_addr {
char hw_addr[ETH_ALEN];
struct {
u32 version;
static void __iomem *dst;
// register the network device
int register_network(int index) {
// allocate ring buffer for outgoing packets
tx_queue.packets = kcalloc(tx_queue_size, sizeof(struct tx_packet), GFP_KERNEL);
if (IS_ERR(tx_queue.packets)) {
pr_err("flxnet_dev: can't allocate memory for the TX packet queue");
rv = -ENODEV;
goto err_alloc_tx_queue;
}
pr_err("flxnet_dev: can't allocate memory for the network device structure");
strlcpy(flxnet->name, "flxnet%d", index);
flxnet->netdev_ops = &flx_netdev_ops;
flxnet->ethtool_ops = ðtool_ops;
flxnet->watchdog_timeo = msecs_to_jiffies(TX_TIMEOUT_MS);