#include <net/bluetooth/hci_core.h>
#include "adapter.h"
-#define ESP_IF_TYPE_SDIO 1
-#define ESP_IF_TYPE_SPI 2
+#define ESP_IF_TYPE_SDIO 1
+#define ESP_IF_TYPE_SPI 2
/* Network link status */
-#define ESP_LINK_DOWN 0
-#define ESP_LINK_UP 1
+#define ESP_LINK_DOWN 0
+#define ESP_LINK_UP 1
-#define ESP_MAX_INTERFACE 2
+#define ESP_MAX_INTERFACE 2
-#define ESP_PAYLOAD_HEADER 8
+#define ESP_PAYLOAD_HEADER 8
struct esp_private;
struct esp_adapter;
-#define ACQUIRE_LOCK 1
-#define LOCK_ALREADY_ACQUIRED 0
+#define ACQUIRE_LOCK 1
+#define LOCK_ALREADY_ACQUIRED 0
struct esp_adapter {
- u8 if_type;
- u32 capabilities;
+ u8 if_type;
+ u32 capabilities;
/* Possible types:
- * struct esp_sdio_context */
- void *if_context;
+ * struct esp_sdio_context */
+ void *if_context;
- struct esp_if_ops *if_ops;
+ struct esp_if_ops *if_ops;
/* Private for each interface */
- struct esp_private *priv[ESP_MAX_INTERFACE];
- struct hci_dev *hcidev;
+ struct esp_private *priv[ESP_MAX_INTERFACE];
+ struct hci_dev *hcidev;
- struct workqueue_struct *if_rx_workqueue;
- struct work_struct if_rx_work;
+ struct workqueue_struct *if_rx_workqueue;
+ struct work_struct if_rx_work;
/* Process TX work */
- struct workqueue_struct *tx_workqueue;
- struct work_struct tx_work;
-
- /* TX queue */
- struct sk_buff_head tx_q;
-
- /* RX Queue */
- struct sk_buff_head rx_q;
-
- /* Counters */
- atomic_t tx_pending;
- atomic_t rx_pending;
+ struct workqueue_struct *tx_workqueue;
+ struct work_struct tx_work;
};
struct esp_private {
- struct esp_adapter *adapter;
- struct net_device *ndev;
- struct net_device_stats stats;
- u8 link_state;
- u8 mac_address[6];
- u8 if_type;
- u8 if_num;
+ struct esp_adapter *adapter;
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ u8 link_state;
+ u8 mac_address[6];
+ u8 if_type;
+ u8 if_num;
};
struct esp_skb_cb {
- struct esp_private *priv;
+ struct esp_private *priv;
};
#endif
void esp_process_new_packet_intr(struct esp_adapter *adapter);
struct esp_adapter * esp_get_adapter(void);
struct sk_buff * esp_alloc_skb(u32 len);
-int esp_send_packet(struct esp_adapter *adapter, u8 *buf, u32 size);
+int esp_send_packet(struct esp_adapter *adapter, struct sk_buff *skb);
u8 esp_is_bt_supported_over_sdio(u32 cap);
void esp_tx_pause(void);
void esp_tx_resume(void);
hdr->offset = cpu_to_le16(sizeof(struct esp_payload_header));
hdr->hci_pkt_type = hci_skb_pkt_type(skb);
- ret = esp_send_packet(adapter, skb->data, skb->len);
+ ret = esp_send_packet(adapter, skb);
if (ret) {
hdev->stat.err_tx++;
esp_hci_update_tx_counter(hdev, hdr->hci_pkt_type, skb->len);
}
- dev_kfree_skb(skb);
-
return 0;
}
struct esp_if_ops {
int (*init)(struct esp_adapter *adapter);
struct sk_buff* (*read)(struct esp_adapter *adapter);
- int (*write)(struct esp_adapter *adapter, u8 *buf, u32 size);
+ int (*write)(struct esp_adapter *adapter, struct sk_buff *skb);
int (*deinit)(struct esp_adapter *adapter);
};
static int esp_serial_write(struct file *file, const char __user *user_buffer, size_t size, loff_t * offset)
{
struct esp_payload_header *hdr;
- char *buf;
+ u8 *tx_buf;
struct esp_serial_devs *dev;
+ struct sk_buff * tx_skb;
int ret;
size_t total_len;
dev = (struct esp_serial_devs *) file->private_data;
total_len = size + sizeof(struct esp_payload_header);
- buf = kmalloc(total_len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Error allocating buffer to send serial data\n");
+ tx_skb = esp_alloc_skb(total_len);
+ if (!tx_skb) {
+ printk (KERN_ERR "%s: SKB alloc failed\n", __func__);
return -ENOMEM;
}
- hdr = (struct esp_payload_header *) buf;
+ tx_buf = skb_put(tx_skb, total_len);
+
+ hdr = (struct esp_payload_header *) tx_buf;
memset (hdr, 0, sizeof(struct esp_payload_header));
hdr->len = cpu_to_le16(size);
hdr->offset = cpu_to_le16(sizeof(struct esp_payload_header));
- ret = copy_from_user(buf + hdr->offset, user_buffer, size);
- if (ret != 0) {
- kfree(buf);
+ ret = copy_from_user(tx_buf + hdr->offset, user_buffer, size);
+ if (ret) {
+ dev_kfree_skb(tx_skb);
printk(KERN_ERR "Error copying buffer to send serial data\n");
return -EFAULT;
}
- /* print_hex_dump(KERN_INFO, "esp_serial_tx: ", DUMP_PREFIX_ADDRESS, 16, 1, buf, total_len, 1 ); */
-
- ret = esp_send_packet(dev->priv, buf, total_len);
+ ret = esp_send_packet(dev->priv, tx_skb);
if (ret) {
printk (KERN_ERR "%s: Failed to transmit data\n", __func__);
}
- kfree(buf);
return size;
}
static void esp_tx_timeout(struct net_device *ndev);
static struct net_device_stats* esp_get_stats(struct net_device *ndev);
static void esp_set_rx_mode(struct net_device *ndev);
-int esp_send_packet(struct esp_adapter *adapter, u8 *buf, u32 size);
+static int process_tx_packet (struct sk_buff *skb);
+int esp_send_packet(struct esp_adapter *adapter, struct sk_buff *skb);
static const struct net_device_ops esp_netdev_ops = {
.ndo_open = esp_open,
/* print_hex_dump_bytes("Tx:", DUMP_PREFIX_NONE, skb->data, 8);*/
- skb_queue_tail(&adapter.tx_q, skb);
- atomic_inc(&adapter.tx_pending);
- queue_work(adapter.tx_workqueue, &adapter.tx_work);
-
- return 0;
+ return process_tx_packet(skb);
}
u8 esp_is_bt_supported_over_sdio(u32 cap)
queue_work(adapter->if_rx_workqueue, &adapter->if_rx_work);
}
-static void process_tx_packet (void)
+static int process_tx_packet (struct sk_buff *skb)
{
- struct sk_buff *skb;
struct esp_private *priv;
struct esp_skb_cb *cb;
struct esp_payload_header *payload_header;
u16 len = 0;
static u32 c = 0;
- while ((skb = skb_dequeue(&adapter.tx_q))) {
- c++;
- /* Get the priv */
- cb = (struct esp_skb_cb *) skb->cb;
- priv = cb->priv;
+ c++;
+ /* Get the priv */
+ cb = (struct esp_skb_cb *) skb->cb;
+ priv = cb->priv;
- if (!priv) {
- dev_kfree_skb_any(skb);
- atomic_dec(&adapter.tx_pending);
- continue;
- }
+ if (!priv) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
- len = skb->len;
+ len = skb->len;
- /* Create space for payload header */
- pad_len = sizeof(struct esp_payload_header);
+ /* Create space for payload header */
+ pad_len = sizeof(struct esp_payload_header);
- if (skb_headroom(skb) < pad_len) {
- /* insufficent headroom to add payload header */
- new_skb = skb_realloc_headroom(skb, pad_len);
-
- if(!new_skb) {
- printk(KERN_ERR "%s: Failed to allocate SKB", __func__);
- dev_kfree_skb(skb);
- atomic_dec(&adapter.tx_pending);
- continue;
- }
+ if (skb_headroom(skb) < pad_len) {
+ /* insufficent headroom to add payload header */
+ new_skb = skb_realloc_headroom(skb, pad_len);
+ if(!new_skb) {
+ printk(KERN_ERR "%s: Failed to allocate SKB", __func__);
dev_kfree_skb(skb);
-
- skb = new_skb;
+ return -ENOMEM;
}
- skb_push(skb, pad_len);
+ dev_kfree_skb(skb);
+
+ skb = new_skb;
+ }
+
+ skb_push(skb, pad_len);
- /* Set payload header */
- payload_header = (struct esp_payload_header *) skb->data;
- memset(payload_header, 0, pad_len);
+ /* Set payload header */
+ payload_header = (struct esp_payload_header *) skb->data;
+ memset(payload_header, 0, pad_len);
- payload_header->if_type = priv->if_type;
- payload_header->if_num = priv->if_num;
- payload_header->len = cpu_to_le16(skb->len - pad_len);
- payload_header->offset = cpu_to_le16(pad_len);
- payload_header->reserved1 = c % 255;
+ payload_header->if_type = priv->if_type;
+ payload_header->if_num = priv->if_num;
+ payload_header->len = cpu_to_le16(skb->len - pad_len);
+ payload_header->offset = cpu_to_le16(pad_len);
+ payload_header->reserved1 = c % 255;
-/* printk (KERN_ERR "H -> S: %d %d %d %d", len, payload_header->offset,*/
-/* payload_header->len, payload_header->reserved1);*/
+ /* printk (KERN_ERR "H -> S: %d %d %d %d", len, payload_header->offset,*/
+ /* payload_header->len, payload_header->reserved1);*/
- if (!stop_data) {
- ret = esp_send_packet(priv->adapter, skb->data, skb->len);
+ if (!stop_data) {
+ ret = esp_send_packet(priv->adapter, skb);
- if (ret) {
- priv->stats.tx_errors++;
- } else {
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += skb->len;
- }
+ if (ret) {
+ priv->stats.tx_errors++;
} else {
- priv->stats.tx_dropped++;
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
}
-
+ } else {
dev_kfree_skb_any(skb);
- atomic_dec(&adapter.tx_pending);
+ priv->stats.tx_dropped++;
}
+
+ return 0;
}
static void process_rx_packet(struct sk_buff *skb)
skb->dev = priv->ndev;
skb->protocol = eth_type_trans(skb, priv->ndev);
skb->ip_summed = CHECKSUM_NONE;
- /* print_hex_dump_bytes("Rx:", DUMP_PREFIX_NONE, skb->data, 8);*/
+ /* print_hex_dump_bytes("Rx:", DUMP_PREFIX_NONE, skb->data, 8); */
/* Forward skb to kernel */
netif_rx(skb);
return 0;
}
-int esp_send_packet(struct esp_adapter *adapter, u8 *buf, u32 size)
+int esp_send_packet(struct esp_adapter *adapter, struct sk_buff *skb)
{
if (!adapter || !adapter->if_ops || !adapter->if_ops->write)
return -EINVAL;
- return adapter->if_ops->write(adapter, buf, size);
+ return adapter->if_ops->write(adapter, skb);
}
static int insert_priv_to_adapter(struct esp_private *priv)
return ret;
}
-static void flush_ring_buffers(struct esp_adapter *adapter)
-{
- struct sk_buff *skb;
-
- printk (KERN_INFO "%s: Flush Pending SKBs: %d %d\n", __func__,
- atomic_read(&adapter->tx_pending),
- atomic_read(&adapter->rx_pending));
-
- while ((skb = skb_dequeue(&adapter->tx_q))) {
- dev_kfree_skb_any(skb);
- atomic_dec(&adapter->tx_pending);
- }
-
- while ((skb = skb_dequeue(&adapter->rx_q))) {
- dev_kfree_skb_any(skb);
- atomic_dec(&adapter->rx_pending);
- }
-}
-
static void esp_remove_network_interfaces(struct esp_adapter *adapter)
{
if (adapter->priv[0]->ndev) {
esp_remove_network_interfaces(adapter);
- flush_ring_buffers(adapter);
-
adapter->priv[0] = NULL;
adapter->priv[1] = NULL;
- atomic_set(&adapter->tx_pending, 0);
- atomic_set(&adapter->rx_pending, 0);
-
return 0;
}
-
-static void esp_tx_work (struct work_struct *work)
-{
- process_tx_packet();
-}
-
static void esp_if_rx_work (struct work_struct *work)
{
/* read inbound packet and forward it to network/serial interface */
return NULL;
}
- INIT_WORK(&adapter.tx_work, esp_tx_work);
-
- /* Prepare TX work */
- skb_queue_head_init(&adapter.tx_q);
- skb_queue_head_init(&adapter.rx_q);
-
- atomic_set(&adapter.tx_pending, 0);
- atomic_set(&adapter.rx_pending, 0);
-
return &adapter;
}
#endif
#include <linux/kthread.h>
+#define TX_MAX_PENDING_COUNT 700
+#define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT - (TX_MAX_PENDING_COUNT/5))
+
#define CHECK_SDIO_RW_ERROR(ret) do { \
if (ret) \
printk(KERN_ERR "%s: CMD53 read/write error at %d\n", __func__, __LINE__); \
} while (0);
struct esp_sdio_context sdio_context;
+static atomic_t tx_pending;
+static atomic_t queue_items;
#ifdef CONFIG_ENABLE_MONITOR_PROCESS
struct task_struct *monitor_thread;
#endif
+struct task_struct *tx_thread;
static int init_context(struct esp_sdio_context *context);
static struct sk_buff * read_packet(struct esp_adapter *adapter);
-static int write_packet(struct esp_adapter *adapter, u8 *buf, u32 size);
+static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
/*int deinit_context(struct esp_adapter *adapter);*/
static const struct sdio_device_id esp_devices[] = {
kthread_stop(monitor_thread);
#endif
+ if (tx_thread)
+ kthread_stop(tx_thread);
+
if (context) {
generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
msleep(100);
}
}
+ skb_queue_purge(&(sdio_context.tx_q));
memset(context, 0, sizeof(struct esp_sdio_context));
}
context->adapter = esp_get_adapter();
- if (!context->adapter)
+ if (unlikely(!context->adapter))
printk (KERN_ERR "%s: Failed to get adapter\n", __func__);
+ skb_queue_head_init(&(sdio_context.tx_q));
+ atomic_set(&queue_items, 0);
+
context->adapter->if_type = ESP_IF_TYPE_SDIO;
return ret;
return skb;
}
-static int write_packet(struct esp_adapter *adapter, u8 *buf, u32 size)
+static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
{
- u32 block_cnt = 0, buf_needed = 0;
- u32 buf_available = 0;
- int ret = 0;
- u8 *pos = NULL;
- u32 data_left, len_to_send, pad;
- struct esp_sdio_context *context;
+ u32 max_pkt_size = ESP_RX_BUFFER_SIZE - sizeof(struct esp_payload_header);
+
+ if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
+ printk(KERN_ERR "%s: Invalid args\n", __func__);
+ if(skb)
+ dev_kfree_skb(skb);
- if (!adapter || !adapter->if_context || !buf || !size) {
- printk (KERN_ERR "%s: Invalid args\n", __func__);
return -EINVAL;
}
+ if (skb->len > max_pkt_size) {
+ printk(KERN_ERR "%s: Drop pkt of len[%u] > max SDIO transport len[%u]\n",
+ __func__, skb->len, max_pkt_size);
+ dev_kfree_skb(skb);
+ return -EPERM;
+ }
+
+ if (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT) {
+ esp_tx_pause();
+ dev_kfree_skb(skb);
+ return -EBUSY;
+ }
+
+
+ /* Notify to process queue */
+ atomic_inc(&queue_items);
+
+ /* Enqueue SKB in tx_q */
+ atomic_inc(&tx_pending);
+
+ skb_queue_tail(&(sdio_context.tx_q), skb);
+
+ return 0;
+}
+
+static int tx_process(void *data)
+{
+ int ret = 0;
+ u32 block_cnt = 0;
+ u32 buf_needed = 0, buf_available = 0;
+ u8 *pos = NULL;
+ u32 data_left, len_to_send, pad;
+ struct sk_buff *tx_skb = NULL;
+ struct esp_adapter *adapter = (struct esp_adapter *) data;
+ struct esp_sdio_context *context = NULL;
+
context = adapter->if_context;
- sdio_claim_host(context->func);
+ while (!kthread_should_stop()) {
- buf_needed = (size + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
+ if (context->state != ESP_CONTEXT_READY) {
+ msleep(10);
+ continue;
+ }
- ret = esp_slave_get_tx_buffer_num(context, &buf_available, LOCK_ALREADY_ACQUIRED);
+ if (atomic_read(&queue_items) <= 0) {
+ msleep(10);
+ continue;
+ }
-/* printk(KERN_ERR "%s: TX -> Available [%d], needed [%d]\n", __func__, buf_available, buf_needed);*/
+ tx_skb = skb_dequeue(&(context->tx_q));
+ if (!tx_skb) {
+ continue;
+ }
- if (buf_available < buf_needed) {
- printk(KERN_DEBUG "%s: Not enough buffers available: availabale [%d], needed [%d]\n", __func__,
- buf_available, buf_needed);
- sdio_release_host(context->func);
- return -ENOMEM;
- }
+ atomic_dec(&queue_items);
+ atomic_dec(&tx_pending);
- pos = buf;
- data_left = len_to_send = 0;
+ /* resume network tx queue if bearable load */
+ if (atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
+ esp_tx_resume();
+ }
- data_left = size;
- pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
- data_left += pad;
+ sdio_claim_host(context->func);
+ buf_needed = (tx_skb->len + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
- do {
- block_cnt = data_left / ESP_BLOCK_SIZE;
- len_to_send = data_left;
- ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
- pos, (len_to_send + 3) & (~3), LOCK_ALREADY_ACQUIRED);
+ ret = esp_slave_get_tx_buffer_num(context, &buf_available, LOCK_ALREADY_ACQUIRED);
- if (ret) {
- printk (KERN_ERR "%s: Failed to send data\n", __func__);
+ ///if (unlikely(ret))
+ /// dev_kfree_skb(tx_skb);
+ /// continue;
+
+
+ if (buf_available < buf_needed) {
+ printk(KERN_DEBUG "%s: Drop pkt: available bufs[%d] < needed bufs[%d]\n",
+ __func__, buf_available, buf_needed);
sdio_release_host(context->func);
- return ret;
+
+ /* drop the packet */
+ dev_kfree_skb(tx_skb);
+ continue;
}
-/* printk (KERN_ERR "--> %d %d %d\n", block_cnt, data_left, len_to_send);*/
- data_left -= len_to_send;
- pos += len_to_send;
- } while (data_left);
+ pos = tx_skb->data;
+ data_left = len_to_send = 0;
- context->tx_buffer_count += buf_needed;
- context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
+ data_left = tx_skb->len;
+ pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
+ data_left += pad;
- sdio_release_host(context->func);
+ do {
+ block_cnt = data_left / ESP_BLOCK_SIZE;
+ len_to_send = data_left;
+ ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
+ pos, (len_to_send + 3) & (~3), LOCK_ALREADY_ACQUIRED);
+
+ if (ret) {
+ printk (KERN_ERR "%s: Failed to send data\n", __func__);
+ sdio_release_host(context->func);
+ break;
+ }
+
+ /* printk (KERN_ERR "--> %d %d %d\n", block_cnt, data_left, len_to_send); */
+
+ data_left -= len_to_send;
+ pos += len_to_send;
+ } while (data_left);
+
+ if (ret) {
+ /* drop the packet */
+ dev_kfree_skb(tx_skb);
+ continue;
+ }
+
+ context->tx_buffer_count += buf_needed;
+ context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
+
+ sdio_release_host(context->func);
+ dev_kfree_skb(tx_skb);
+ }
+
+ do_exit(0);
return 0;
}
msleep(200);
+ atomic_set(&tx_pending, 0);
ret = init_context(context);
if (ret) {
deinit_sdio_func(func);
return ret;
}
+ tx_thread = kthread_run(tx_process, context->adapter, "esp32_TX");
+
+ if (!tx_thread)
+ printk (KERN_ERR "Failed to create esp32_sdio TX thread\n");
+
#ifdef CONFIG_SUPPORT_ESP_SERIAL
ret = esp_serial_init((void *) context->adapter);
if (ret != 0) {
printk (KERN_ERR "Failed to create monitor thread\n");
#endif
-
msleep(200);
generate_slave_intr(context, BIT(ESP_OPEN_DATA_PATH));
return ret;
#include "esp.h"
/* Interrupt Status */
-#define ESP_SLAVE_BIT0_INT BIT(0)
-#define ESP_SLAVE_BIT1_INT BIT(1)
-#define ESP_SLAVE_BIT2_INT BIT(2)
-#define ESP_SLAVE_BIT3_INT BIT(3)
-#define ESP_SLAVE_BIT4_INT BIT(4)
-#define ESP_SLAVE_BIT5_INT BIT(5)
-#define ESP_SLAVE_BIT6_INT BIT(6)
-#define ESP_SLAVE_BIT7_INT BIT(7)
-#define ESP_SLAVE_RX_UNDERFLOW_INT BIT(16)
-#define ESP_SLAVE_TX_OVERFLOW_INT BIT(17)
-#define ESP_SLAVE_RX_NEW_PACKET_INT BIT(23)
-
-
-#define ESP_SLAVE_CMD53_END_ADDR 0x1F800
-#define ESP_SLAVE_LEN_MASK 0xFFFFF
-#define ESP_BLOCK_SIZE 512
-#define ESP_RX_BYTE_MAX 0x100000
-#define ESP_RX_BUFFER_SIZE 2048
-
-#define ESP_TX_BUFFER_MASK 0xFFF
-#define ESP_TX_BUFFER_MAX 0x1000
-#define ESP_MAX_BUF_CNT 10
-
-#define ESP_SLAVE_SLCHOST_BASE 0x3FF55000
-
-#define ESP_SLAVE_SCRATCH_REG_7 (ESP_SLAVE_SLCHOST_BASE + 0x8C)
+#define ESP_SLAVE_BIT0_INT BIT(0)
+#define ESP_SLAVE_BIT1_INT BIT(1)
+#define ESP_SLAVE_BIT2_INT BIT(2)
+#define ESP_SLAVE_BIT3_INT BIT(3)
+#define ESP_SLAVE_BIT4_INT BIT(4)
+#define ESP_SLAVE_BIT5_INT BIT(5)
+#define ESP_SLAVE_BIT6_INT BIT(6)
+#define ESP_SLAVE_BIT7_INT BIT(7)
+#define ESP_SLAVE_RX_UNDERFLOW_INT BIT(16)
+#define ESP_SLAVE_TX_OVERFLOW_INT BIT(17)
+#define ESP_SLAVE_RX_NEW_PACKET_INT BIT(23)
+
+
+#define ESP_SLAVE_CMD53_END_ADDR 0x1F800
+#define ESP_SLAVE_LEN_MASK 0xFFFFF
+#define ESP_BLOCK_SIZE 512
+#define ESP_RX_BYTE_MAX 0x100000
+#define ESP_RX_BUFFER_SIZE 2048
+
+#define ESP_TX_BUFFER_MASK 0xFFF
+#define ESP_TX_BUFFER_MAX 0x1000
+#define ESP_MAX_BUF_CNT 10
+
+#define ESP_SLAVE_SLCHOST_BASE 0x3FF55000
+
+#define ESP_SLAVE_SCRATCH_REG_7 (ESP_SLAVE_SLCHOST_BASE + 0x8C)
/* SLAVE registers */
/* Interrupt Registers */
-#define ESP_SLAVE_INT_RAW_REG (ESP_SLAVE_SLCHOST_BASE + 0x50)
-#define ESP_SLAVE_INT_ST_REG (ESP_SLAVE_SLCHOST_BASE + 0x58)
-#define ESP_SLAVE_INT_CLR_REG (ESP_SLAVE_SLCHOST_BASE + 0xD4)
+#define ESP_SLAVE_INT_RAW_REG (ESP_SLAVE_SLCHOST_BASE + 0x50)
+#define ESP_SLAVE_INT_ST_REG (ESP_SLAVE_SLCHOST_BASE + 0x58)
+#define ESP_SLAVE_INT_CLR_REG (ESP_SLAVE_SLCHOST_BASE + 0xD4)
/* Data path registers*/
-#define ESP_SLAVE_PACKET_LEN_REG (ESP_SLAVE_SLCHOST_BASE + 0x60)
-#define ESP_SLAVE_TOKEN_RDATA (ESP_SLAVE_SLCHOST_BASE + 0x44)
+#define ESP_SLAVE_PACKET_LEN_REG (ESP_SLAVE_SLCHOST_BASE + 0x60)
+#define ESP_SLAVE_TOKEN_RDATA (ESP_SLAVE_SLCHOST_BASE + 0x44)
/* Scratch registers*/
-#define ESP_SLAVE_SCRATCH_REG_0 (ESP_SLAVE_SLCHOST_BASE + 0x6C)
-#define ESP_SLAVE_SCRATCH_REG_1 (ESP_SLAVE_SLCHOST_BASE + 0x70)
-#define ESP_SLAVE_SCRATCH_REG_2 (ESP_SLAVE_SLCHOST_BASE + 0x74)
-#define ESP_SLAVE_SCRATCH_REG_3 (ESP_SLAVE_SLCHOST_BASE + 0x78)
-#define ESP_SLAVE_SCRATCH_REG_4 (ESP_SLAVE_SLCHOST_BASE + 0x7C)
-#define ESP_SLAVE_SCRATCH_REG_6 (ESP_SLAVE_SLCHOST_BASE + 0x88)
-#define ESP_SLAVE_SCRATCH_REG_8 (ESP_SLAVE_SLCHOST_BASE + 0x9C)
-#define ESP_SLAVE_SCRATCH_REG_9 (ESP_SLAVE_SLCHOST_BASE + 0xA0)
-#define ESP_SLAVE_SCRATCH_REG_10 (ESP_SLAVE_SLCHOST_BASE + 0xA4)
-#define ESP_SLAVE_SCRATCH_REG_11 (ESP_SLAVE_SLCHOST_BASE + 0xA8)
-#define ESP_SLAVE_SCRATCH_REG_12 (ESP_SLAVE_SLCHOST_BASE + 0xAC)
-#define ESP_SLAVE_SCRATCH_REG_13 (ESP_SLAVE_SLCHOST_BASE + 0xB0)
-#define ESP_SLAVE_SCRATCH_REG_14 (ESP_SLAVE_SLCHOST_BASE + 0xB4)
-#define ESP_SLAVE_SCRATCH_REG_15 (ESP_SLAVE_SLCHOST_BASE + 0xB8)
-
-#define ESP_ADDRESS_MASK 0x3FF
-
-#define ESP_VENDOR_ID 0x6666
-#define ESP_DEVICE_ID_1 0x2222
-#define ESP_DEVICE_ID_2 0x3333
+#define ESP_SLAVE_SCRATCH_REG_0 (ESP_SLAVE_SLCHOST_BASE + 0x6C)
+#define ESP_SLAVE_SCRATCH_REG_1 (ESP_SLAVE_SLCHOST_BASE + 0x70)
+#define ESP_SLAVE_SCRATCH_REG_2 (ESP_SLAVE_SLCHOST_BASE + 0x74)
+#define ESP_SLAVE_SCRATCH_REG_3 (ESP_SLAVE_SLCHOST_BASE + 0x78)
+#define ESP_SLAVE_SCRATCH_REG_4 (ESP_SLAVE_SLCHOST_BASE + 0x7C)
+#define ESP_SLAVE_SCRATCH_REG_6 (ESP_SLAVE_SLCHOST_BASE + 0x88)
+#define ESP_SLAVE_SCRATCH_REG_8 (ESP_SLAVE_SLCHOST_BASE + 0x9C)
+#define ESP_SLAVE_SCRATCH_REG_9 (ESP_SLAVE_SLCHOST_BASE + 0xA0)
+#define ESP_SLAVE_SCRATCH_REG_10 (ESP_SLAVE_SLCHOST_BASE + 0xA4)
+#define ESP_SLAVE_SCRATCH_REG_11 (ESP_SLAVE_SLCHOST_BASE + 0xA8)
+#define ESP_SLAVE_SCRATCH_REG_12 (ESP_SLAVE_SLCHOST_BASE + 0xAC)
+#define ESP_SLAVE_SCRATCH_REG_13 (ESP_SLAVE_SLCHOST_BASE + 0xB0)
+#define ESP_SLAVE_SCRATCH_REG_14 (ESP_SLAVE_SLCHOST_BASE + 0xB4)
+#define ESP_SLAVE_SCRATCH_REG_15 (ESP_SLAVE_SLCHOST_BASE + 0xB8)
+
+#define ESP_ADDRESS_MASK 0x3FF
+
+#define ESP_VENDOR_ID 0x6666
+#define ESP_DEVICE_ID_1 0x2222
+#define ESP_DEVICE_ID_2 0x3333
enum context_state {
};
struct esp_sdio_context {
- struct esp_adapter *adapter;
- struct sdio_func *func;
- enum context_state state;
- u32 rx_byte_count;
- u32 tx_buffer_count;
+ struct esp_adapter *adapter;
+ struct sdio_func *func;
+ enum context_state state;
+ struct sk_buff_head tx_q;
+ u32 rx_byte_count;
+ u32 tx_buffer_count;
};
#endif
#define ESP_PRIV_FIRMWARE_CHIP_ESP32S2 (0x2)
static struct sk_buff * read_packet(struct esp_adapter *adapter);
-static int write_packet(struct esp_adapter *adapter, u8 *buf, u32 size);
+static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
static void spi_exit(void);
static void adjust_spi_clock(u8 spi_clk_mhz);
return skb;
}
-static int write_packet(struct esp_adapter *adapter, u8 *buf, u32 size)
+static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
{
- struct esp_spi_context *context;
- struct sk_buff *skb;
- u8 *tx_buf = NULL;
+ u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
- if (!adapter || !adapter->if_context || !buf || !size || (size > SPI_BUF_SIZE)) {
+ if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
printk (KERN_ERR "%s: Invalid args\n", __func__);
+ dev_kfree_skb(skb);
return -EINVAL;
}
+ if (skb->len > max_pkt_size) {
+ printk (KERN_ERR "%s: Drop pkt of len[%u] > max spi transport len[%u]\n",
+ __func__, skb->len, max_pkt_size);
+ dev_kfree_skb(skb);
+ return -EPERM;
+ }
+
if (!data_path) {
+ dev_kfree_skb(skb);
return -EPERM;
}
if (tx_pending >= TX_MAX_PENDING_COUNT) {
esp_tx_pause();
- return -EBUSY;
- }
-
- /* Adjust length to make it multiple of 4 bytes */
- size += 4 - (size & 3);
-
- context = adapter->if_context;
-
- skb = esp_alloc_skb(size);
-
- if (!skb)
- return -ENOMEM;
-
- tx_buf = skb_put(skb, size);
-
- if (!tx_buf) {
dev_kfree_skb(skb);
- return -ENOMEM;
+ return -EBUSY;
}
- /* TODO: This memecpy can be avoided if this function receives SKB as an argument */
- memcpy(tx_buf, buf, size);
-
/* Enqueue SKB in tx_q */
skb_queue_tail(&spi_context.tx_q, skb);