]> Git Repo - J-linux.git/blob - drivers/net/wireless/ath/ath12k/dp.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / wireless / ath / ath12k / dp.c
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6
7 #include <crypto/hash.h>
8 #include "core.h"
9 #include "dp_tx.h"
10 #include "hal_tx.h"
11 #include "hif.h"
12 #include "debug.h"
13 #include "dp_rx.h"
14 #include "peer.h"
15 #include "dp_mon.h"
16
17 enum ath12k_dp_desc_type {
18         ATH12K_DP_TX_DESC,
19         ATH12K_DP_RX_DESC,
20 };
21
22 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
23                                           struct sk_buff *skb)
24 {
25         dev_kfree_skb_any(skb);
26 }
27
28 void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
29 {
30         struct ath12k_base *ab = ar->ab;
31         struct ath12k_peer *peer;
32
33         /* TODO: Any other peer specific DP cleanup */
34
35         spin_lock_bh(&ab->base_lock);
36         peer = ath12k_peer_find(ab, vdev_id, addr);
37         if (!peer) {
38                 ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
39                             addr, vdev_id);
40                 spin_unlock_bh(&ab->base_lock);
41                 return;
42         }
43
44         ath12k_dp_rx_peer_tid_cleanup(ar, peer);
45         crypto_free_shash(peer->tfm_mmic);
46         peer->dp_setup_done = false;
47         spin_unlock_bh(&ab->base_lock);
48 }
49
50 int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
51 {
52         struct ath12k_base *ab = ar->ab;
53         struct ath12k_peer *peer;
54         u32 reo_dest;
55         int ret = 0, tid;
56
57         /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
58         reo_dest = ar->dp.mac_id + 1;
59         ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
60                                         WMI_PEER_SET_DEFAULT_ROUTING,
61                                         DP_RX_HASH_ENABLE | (reo_dest << 1));
62
63         if (ret) {
64                 ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
65                             ret, addr, vdev_id);
66                 return ret;
67         }
68
69         for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
70                 ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
71                                                   HAL_PN_TYPE_NONE);
72                 if (ret) {
73                         ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
74                                     tid, ret);
75                         goto peer_clean;
76                 }
77         }
78
79         ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
80         if (ret) {
81                 ath12k_warn(ab, "failed to setup rx defrag context\n");
82                 goto peer_clean;
83         }
84
85         /* TODO: Setup other peer specific resource used in data path */
86
87         return 0;
88
89 peer_clean:
90         spin_lock_bh(&ab->base_lock);
91
92         peer = ath12k_peer_find(ab, vdev_id, addr);
93         if (!peer) {
94                 ath12k_warn(ab, "failed to find the peer to del rx tid\n");
95                 spin_unlock_bh(&ab->base_lock);
96                 return -ENOENT;
97         }
98
99         for (; tid >= 0; tid--)
100                 ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
101
102         spin_unlock_bh(&ab->base_lock);
103
104         return ret;
105 }
106
107 void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
108 {
109         if (!ring->vaddr_unaligned)
110                 return;
111
112         dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
113                           ring->paddr_unaligned);
114
115         ring->vaddr_unaligned = NULL;
116 }
117
118 static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
119 {
120         int ext_group_num;
121         u8 mask = 1 << ring_num;
122
123         for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
124              ext_group_num++) {
125                 if (mask & grp_mask[ext_group_num])
126                         return ext_group_num;
127         }
128
129         return -ENOENT;
130 }
131
132 static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
133                                               enum hal_ring_type type, int ring_num)
134 {
135         const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
136         const u8 *grp_mask;
137         int i;
138
139         switch (type) {
140         case HAL_WBM2SW_RELEASE:
141                 if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
142                         grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
143                         ring_num = 0;
144                 } else {
145                         map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
146                         for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
147                                 if (ring_num == map[i].wbm_ring_num) {
148                                         ring_num = i;
149                                         break;
150                                 }
151                         }
152
153                         grp_mask = &ab->hw_params->ring_mask->tx[0];
154                 }
155                 break;
156         case HAL_REO_EXCEPTION:
157                 grp_mask = &ab->hw_params->ring_mask->rx_err[0];
158                 break;
159         case HAL_REO_DST:
160                 grp_mask = &ab->hw_params->ring_mask->rx[0];
161                 break;
162         case HAL_REO_STATUS:
163                 grp_mask = &ab->hw_params->ring_mask->reo_status[0];
164                 break;
165         case HAL_RXDMA_MONITOR_STATUS:
166         case HAL_RXDMA_MONITOR_DST:
167                 grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
168                 break;
169         case HAL_TX_MONITOR_DST:
170                 grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
171                 break;
172         case HAL_RXDMA_BUF:
173                 grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
174                 break;
175         case HAL_RXDMA_MONITOR_BUF:
176         case HAL_TCL_DATA:
177         case HAL_TCL_CMD:
178         case HAL_REO_CMD:
179         case HAL_SW2WBM_RELEASE:
180         case HAL_WBM_IDLE_LINK:
181         case HAL_TCL_STATUS:
182         case HAL_REO_REINJECT:
183         case HAL_CE_SRC:
184         case HAL_CE_DST:
185         case HAL_CE_DST_STATUS:
186         default:
187                 return -ENOENT;
188         }
189
190         return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
191 }
192
193 static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
194                                      struct hal_srng_params *ring_params,
195                                      enum hal_ring_type type, int ring_num)
196 {
197         int msi_group_number, msi_data_count;
198         u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
199         int ret;
200
201         ret = ath12k_hif_get_user_msi_vector(ab, "DP",
202                                              &msi_data_count, &msi_data_start,
203                                              &msi_irq_start);
204         if (ret)
205                 return;
206
207         msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
208                                                               ring_num);
209         if (msi_group_number < 0) {
210                 ath12k_dbg(ab, ATH12K_DBG_PCI,
211                            "ring not part of an ext_group; ring_type: %d,ring_num %d",
212                            type, ring_num);
213                 ring_params->msi_addr = 0;
214                 ring_params->msi_data = 0;
215                 return;
216         }
217
218         if (msi_group_number > msi_data_count) {
219                 ath12k_dbg(ab, ATH12K_DBG_PCI,
220                            "multiple msi_groups share one msi, msi_group_num %d",
221                            msi_group_number);
222         }
223
224         ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
225
226         ring_params->msi_addr = addr_lo;
227         ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
228         ring_params->msi_data = (msi_group_number % msi_data_count)
229                 + msi_data_start;
230         ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
231 }
232
233 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
234                          enum hal_ring_type type, int ring_num,
235                          int mac_id, int num_entries)
236 {
237         struct hal_srng_params params = { 0 };
238         int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
239         int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
240         int ret;
241
242         if (max_entries < 0 || entry_sz < 0)
243                 return -EINVAL;
244
245         if (num_entries > max_entries)
246                 num_entries = max_entries;
247
248         ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
249         ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
250                                                    &ring->paddr_unaligned,
251                                                    GFP_KERNEL);
252         if (!ring->vaddr_unaligned)
253                 return -ENOMEM;
254
255         ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
256         ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
257                       (unsigned long)ring->vaddr_unaligned);
258
259         params.ring_base_vaddr = ring->vaddr;
260         params.ring_base_paddr = ring->paddr;
261         params.num_entries = num_entries;
262         ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
263
264         switch (type) {
265         case HAL_REO_DST:
266                 params.intr_batch_cntr_thres_entries =
267                                         HAL_SRNG_INT_BATCH_THRESHOLD_RX;
268                 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
269                 break;
270         case HAL_RXDMA_BUF:
271         case HAL_RXDMA_MONITOR_BUF:
272         case HAL_RXDMA_MONITOR_STATUS:
273                 params.low_threshold = num_entries >> 3;
274                 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
275                 params.intr_batch_cntr_thres_entries = 0;
276                 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
277                 break;
278         case HAL_TX_MONITOR_DST:
279                 params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
280                 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
281                 params.intr_batch_cntr_thres_entries = 0;
282                 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
283                 break;
284         case HAL_WBM2SW_RELEASE:
285                 if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
286                         params.intr_batch_cntr_thres_entries =
287                                         HAL_SRNG_INT_BATCH_THRESHOLD_TX;
288                         params.intr_timer_thres_us =
289                                         HAL_SRNG_INT_TIMER_THRESHOLD_TX;
290                         break;
291                 }
292                 /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
293                 fallthrough;
294         case HAL_REO_EXCEPTION:
295         case HAL_REO_REINJECT:
296         case HAL_REO_CMD:
297         case HAL_REO_STATUS:
298         case HAL_TCL_DATA:
299         case HAL_TCL_CMD:
300         case HAL_TCL_STATUS:
301         case HAL_WBM_IDLE_LINK:
302         case HAL_SW2WBM_RELEASE:
303         case HAL_RXDMA_DST:
304         case HAL_RXDMA_MONITOR_DST:
305         case HAL_RXDMA_MONITOR_DESC:
306                 params.intr_batch_cntr_thres_entries =
307                                         HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
308                 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
309                 break;
310         case HAL_RXDMA_DIR_BUF:
311                 break;
312         default:
313                 ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
314                 return -EINVAL;
315         }
316
317         ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
318         if (ret < 0) {
319                 ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
320                             ret, ring_num);
321                 return ret;
322         }
323
324         ring->ring_id = ret;
325
326         return 0;
327 }
328
329 static
330 u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
331                                       struct ath12k_link_vif *arvif)
332 {
333         u32 bank_config = 0;
334         struct ath12k_vif *ahvif = arvif->ahvif;
335
336         /* Only valid for raw frames with HW crypto enabled.
337          * With SW crypto, mac80211 sets key per packet
338          */
339         if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
340             test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
341                 bank_config |=
342                         u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher),
343                                         HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
344
345         bank_config |= u32_encode_bits(ahvif->tx_encap_type,
346                                         HAL_TX_BANK_CONFIG_ENCAP_TYPE);
347         bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
348                         u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
349                         u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
350
351         /* only valid if idx_lookup_override is not set in tcl_data_cmd */
352         bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
353
354         bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
355                                         HAL_TX_BANK_CONFIG_ADDRX_EN) |
356                         u32_encode_bits(!!(arvif->hal_addr_search_flags &
357                                         HAL_TX_ADDRY_EN),
358                                         HAL_TX_BANK_CONFIG_ADDRY_EN);
359
360         bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0,
361                                         HAL_TX_BANK_CONFIG_MESH_EN) |
362                         u32_encode_bits(arvif->vdev_id_check_en,
363                                         HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
364
365         bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
366
367         return bank_config;
368 }
369
370 static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
371                                          struct ath12k_link_vif *arvif,
372                                          struct ath12k_dp *dp)
373 {
374         int bank_id = DP_INVALID_BANK_ID;
375         int i;
376         u32 bank_config;
377         bool configure_register = false;
378
379         /* convert vdev params into hal_tx_bank_config */
380         bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
381
382         spin_lock_bh(&dp->tx_bank_lock);
383         /* TODO: implement using idr kernel framework*/
384         for (i = 0; i < dp->num_bank_profiles; i++) {
385                 if (dp->bank_profiles[i].is_configured &&
386                     (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
387                         bank_id = i;
388                         goto inc_ref_and_return;
389                 }
390                 if (!dp->bank_profiles[i].is_configured ||
391                     !dp->bank_profiles[i].num_users) {
392                         bank_id = i;
393                         goto configure_and_return;
394                 }
395         }
396
397         if (bank_id == DP_INVALID_BANK_ID) {
398                 spin_unlock_bh(&dp->tx_bank_lock);
399                 ath12k_err(ab, "unable to find TX bank!");
400                 return bank_id;
401         }
402
403 configure_and_return:
404         dp->bank_profiles[bank_id].is_configured = true;
405         dp->bank_profiles[bank_id].bank_config = bank_config;
406         configure_register = true;
407 inc_ref_and_return:
408         dp->bank_profiles[bank_id].num_users++;
409         spin_unlock_bh(&dp->tx_bank_lock);
410
411         if (configure_register)
412                 ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
413
414         ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
415                    bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
416                    dp->bank_profiles[bank_id].num_users);
417
418         return bank_id;
419 }
420
421 void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
422 {
423         spin_lock_bh(&dp->tx_bank_lock);
424         dp->bank_profiles[bank_id].num_users--;
425         spin_unlock_bh(&dp->tx_bank_lock);
426 }
427
428 static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
429 {
430         struct ath12k_dp *dp = &ab->dp;
431
432         kfree(dp->bank_profiles);
433         dp->bank_profiles = NULL;
434 }
435
436 static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
437 {
438         struct ath12k_dp *dp = &ab->dp;
439         u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
440         int i;
441
442         dp->num_bank_profiles = num_tcl_banks;
443         dp->bank_profiles = kmalloc_array(num_tcl_banks,
444                                           sizeof(struct ath12k_dp_tx_bank_profile),
445                                           GFP_KERNEL);
446         if (!dp->bank_profiles)
447                 return -ENOMEM;
448
449         spin_lock_init(&dp->tx_bank_lock);
450
451         for (i = 0; i < num_tcl_banks; i++) {
452                 dp->bank_profiles[i].is_configured = false;
453                 dp->bank_profiles[i].num_users = 0;
454         }
455
456         return 0;
457 }
458
459 static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
460 {
461         struct ath12k_dp *dp = &ab->dp;
462         int i;
463
464         ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
465         ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
466         ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
467         ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
468         ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
469         for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
470                 ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
471                 ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
472         }
473         ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
474 }
475
476 static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
477 {
478         struct ath12k_dp *dp = &ab->dp;
479         const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
480         struct hal_srng *srng;
481         int i, ret, tx_comp_ring_num;
482         u32 ring_hash_map;
483
484         ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
485                                    HAL_SW2WBM_RELEASE, 0, 0,
486                                    DP_WBM_RELEASE_RING_SIZE);
487         if (ret) {
488                 ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
489                             ret);
490                 goto err;
491         }
492
493         for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
494                 map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
495                 tx_comp_ring_num = map[i].wbm_ring_num;
496
497                 ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
498                                            HAL_TCL_DATA, i, 0,
499                                            DP_TCL_DATA_RING_SIZE);
500                 if (ret) {
501                         ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
502                                     i, ret);
503                         goto err;
504                 }
505
506                 ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
507                                            HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
508                                            DP_TX_COMP_RING_SIZE);
509                 if (ret) {
510                         ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
511                                     tx_comp_ring_num, ret);
512                         goto err;
513                 }
514         }
515
516         ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
517                                    0, 0, DP_REO_REINJECT_RING_SIZE);
518         if (ret) {
519                 ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
520                             ret);
521                 goto err;
522         }
523
524         ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
525                                    HAL_WBM2SW_REL_ERR_RING_NUM, 0,
526                                    DP_RX_RELEASE_RING_SIZE);
527         if (ret) {
528                 ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
529                 goto err;
530         }
531
532         ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
533                                    0, 0, DP_REO_EXCEPTION_RING_SIZE);
534         if (ret) {
535                 ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
536                             ret);
537                 goto err;
538         }
539
540         ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
541                                    0, 0, DP_REO_CMD_RING_SIZE);
542         if (ret) {
543                 ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
544                 goto err;
545         }
546
547         srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
548         ath12k_hal_reo_init_cmd_ring(ab, srng);
549
550         ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
551                                    0, 0, DP_REO_STATUS_RING_SIZE);
552         if (ret) {
553                 ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
554                 goto err;
555         }
556
557         /* When hash based routing of rx packet is enabled, 32 entries to map
558          * the hash values to the ring will be configured. Each hash entry uses
559          * four bits to map to a particular ring. The ring mapping will be
560          * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
561          * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
562          */
563         ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
564                         HAL_HASH_ROUTING_RING_SW2 << 4 |
565                         HAL_HASH_ROUTING_RING_SW3 << 8 |
566                         HAL_HASH_ROUTING_RING_SW4 << 12 |
567                         HAL_HASH_ROUTING_RING_SW1 << 16 |
568                         HAL_HASH_ROUTING_RING_SW2 << 20 |
569                         HAL_HASH_ROUTING_RING_SW3 << 24 |
570                         HAL_HASH_ROUTING_RING_SW4 << 28;
571
572         ath12k_hal_reo_hw_setup(ab, ring_hash_map);
573
574         return 0;
575
576 err:
577         ath12k_dp_srng_common_cleanup(ab);
578
579         return ret;
580 }
581
582 static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
583 {
584         struct ath12k_dp *dp = &ab->dp;
585         struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
586         int i;
587
588         for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
589                 if (!slist[i].vaddr)
590                         continue;
591
592                 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
593                                   slist[i].vaddr, slist[i].paddr);
594                 slist[i].vaddr = NULL;
595         }
596 }
597
598 static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
599                                                   int size,
600                                                   u32 n_link_desc_bank,
601                                                   u32 n_link_desc,
602                                                   u32 last_bank_sz)
603 {
604         struct ath12k_dp *dp = &ab->dp;
605         struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
606         struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
607         u32 n_entries_per_buf;
608         int num_scatter_buf, scatter_idx;
609         struct hal_wbm_link_desc *scatter_buf;
610         int align_bytes, n_entries;
611         dma_addr_t paddr;
612         int rem_entries;
613         int i;
614         int ret = 0;
615         u32 end_offset, cookie;
616         enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
617
618         n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
619                 ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
620         num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
621
622         if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
623                 return -EINVAL;
624
625         for (i = 0; i < num_scatter_buf; i++) {
626                 slist[i].vaddr = dma_alloc_coherent(ab->dev,
627                                                     HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
628                                                     &slist[i].paddr, GFP_KERNEL);
629                 if (!slist[i].vaddr) {
630                         ret = -ENOMEM;
631                         goto err;
632                 }
633         }
634
635         scatter_idx = 0;
636         scatter_buf = slist[scatter_idx].vaddr;
637         rem_entries = n_entries_per_buf;
638
639         for (i = 0; i < n_link_desc_bank; i++) {
640                 align_bytes = link_desc_banks[i].vaddr -
641                               link_desc_banks[i].vaddr_unaligned;
642                 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
643                              HAL_LINK_DESC_SIZE;
644                 paddr = link_desc_banks[i].paddr;
645                 while (n_entries) {
646                         cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
647                         ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
648                                                       paddr, rbm);
649                         n_entries--;
650                         paddr += HAL_LINK_DESC_SIZE;
651                         if (rem_entries) {
652                                 rem_entries--;
653                                 scatter_buf++;
654                                 continue;
655                         }
656
657                         rem_entries = n_entries_per_buf;
658                         scatter_idx++;
659                         scatter_buf = slist[scatter_idx].vaddr;
660                 }
661         }
662
663         end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
664                      sizeof(struct hal_wbm_link_desc);
665         ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
666                                         n_link_desc, end_offset);
667
668         return 0;
669
670 err:
671         ath12k_dp_scatter_idle_link_desc_cleanup(ab);
672
673         return ret;
674 }
675
676 static void
677 ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
678                               struct dp_link_desc_bank *link_desc_banks)
679 {
680         int i;
681
682         for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
683                 if (link_desc_banks[i].vaddr_unaligned) {
684                         dma_free_coherent(ab->dev,
685                                           link_desc_banks[i].size,
686                                           link_desc_banks[i].vaddr_unaligned,
687                                           link_desc_banks[i].paddr_unaligned);
688                         link_desc_banks[i].vaddr_unaligned = NULL;
689                 }
690         }
691 }
692
693 static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
694                                           struct dp_link_desc_bank *desc_bank,
695                                           int n_link_desc_bank,
696                                           int last_bank_sz)
697 {
698         struct ath12k_dp *dp = &ab->dp;
699         int i;
700         int ret = 0;
701         int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
702
703         for (i = 0; i < n_link_desc_bank; i++) {
704                 if (i == (n_link_desc_bank - 1) && last_bank_sz)
705                         desc_sz = last_bank_sz;
706
707                 desc_bank[i].vaddr_unaligned =
708                                         dma_alloc_coherent(ab->dev, desc_sz,
709                                                            &desc_bank[i].paddr_unaligned,
710                                                            GFP_KERNEL);
711                 if (!desc_bank[i].vaddr_unaligned) {
712                         ret = -ENOMEM;
713                         goto err;
714                 }
715
716                 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
717                                                HAL_LINK_DESC_ALIGN);
718                 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
719                                      ((unsigned long)desc_bank[i].vaddr -
720                                       (unsigned long)desc_bank[i].vaddr_unaligned);
721                 desc_bank[i].size = desc_sz;
722         }
723
724         return 0;
725
726 err:
727         ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
728
729         return ret;
730 }
731
732 void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
733                                  struct dp_link_desc_bank *desc_bank,
734                                  u32 ring_type, struct dp_srng *ring)
735 {
736         ath12k_dp_link_desc_bank_free(ab, desc_bank);
737
738         if (ring_type != HAL_RXDMA_MONITOR_DESC) {
739                 ath12k_dp_srng_cleanup(ab, ring);
740                 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
741         }
742 }
743
744 static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
745 {
746         struct ath12k_dp *dp = &ab->dp;
747         u32 n_mpdu_link_desc, n_mpdu_queue_desc;
748         u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
749         int ret = 0;
750
751         n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
752                            HAL_NUM_MPDUS_PER_LINK_DESC;
753
754         n_mpdu_queue_desc = n_mpdu_link_desc /
755                             HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
756
757         n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
758                                DP_AVG_MSDUS_PER_FLOW) /
759                               HAL_NUM_TX_MSDUS_PER_LINK_DESC;
760
761         n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
762                                DP_AVG_MSDUS_PER_MPDU) /
763                               HAL_NUM_RX_MSDUS_PER_LINK_DESC;
764
765         *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
766                       n_tx_msdu_link_desc + n_rx_msdu_link_desc;
767
768         if (*n_link_desc & (*n_link_desc - 1))
769                 *n_link_desc = 1 << fls(*n_link_desc);
770
771         ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
772                                    HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
773         if (ret) {
774                 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
775                 return ret;
776         }
777         return ret;
778 }
779
780 int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
781                               struct dp_link_desc_bank *link_desc_banks,
782                               u32 ring_type, struct hal_srng *srng,
783                               u32 n_link_desc)
784 {
785         u32 tot_mem_sz;
786         u32 n_link_desc_bank, last_bank_sz;
787         u32 entry_sz, align_bytes, n_entries;
788         struct hal_wbm_link_desc *desc;
789         u32 paddr;
790         int i, ret;
791         u32 cookie;
792         enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
793
794         tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
795         tot_mem_sz += HAL_LINK_DESC_ALIGN;
796
797         if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
798                 n_link_desc_bank = 1;
799                 last_bank_sz = tot_mem_sz;
800         } else {
801                 n_link_desc_bank = tot_mem_sz /
802                                    (DP_LINK_DESC_ALLOC_SIZE_THRESH -
803                                     HAL_LINK_DESC_ALIGN);
804                 last_bank_sz = tot_mem_sz %
805                                (DP_LINK_DESC_ALLOC_SIZE_THRESH -
806                                 HAL_LINK_DESC_ALIGN);
807
808                 if (last_bank_sz)
809                         n_link_desc_bank += 1;
810         }
811
812         if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
813                 return -EINVAL;
814
815         ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
816                                              n_link_desc_bank, last_bank_sz);
817         if (ret)
818                 return ret;
819
820         /* Setup link desc idle list for HW internal usage */
821         entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
822         tot_mem_sz = entry_sz * n_link_desc;
823
824         /* Setup scatter desc list when the total memory requirement is more */
825         if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
826             ring_type != HAL_RXDMA_MONITOR_DESC) {
827                 ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
828                                                              n_link_desc_bank,
829                                                              n_link_desc,
830                                                              last_bank_sz);
831                 if (ret) {
832                         ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
833                                     ret);
834                         goto fail_desc_bank_free;
835                 }
836
837                 return 0;
838         }
839
840         spin_lock_bh(&srng->lock);
841
842         ath12k_hal_srng_access_begin(ab, srng);
843
844         for (i = 0; i < n_link_desc_bank; i++) {
845                 align_bytes = link_desc_banks[i].vaddr -
846                               link_desc_banks[i].vaddr_unaligned;
847                 n_entries = (link_desc_banks[i].size - align_bytes) /
848                             HAL_LINK_DESC_SIZE;
849                 paddr = link_desc_banks[i].paddr;
850                 while (n_entries &&
851                        (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
852                         cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
853                         ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
854                         n_entries--;
855                         paddr += HAL_LINK_DESC_SIZE;
856                 }
857         }
858
859         ath12k_hal_srng_access_end(ab, srng);
860
861         spin_unlock_bh(&srng->lock);
862
863         return 0;
864
865 fail_desc_bank_free:
866         ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
867
868         return ret;
869 }
870
871 int ath12k_dp_service_srng(struct ath12k_base *ab,
872                            struct ath12k_ext_irq_grp *irq_grp,
873                            int budget)
874 {
875         struct napi_struct *napi = &irq_grp->napi;
876         int grp_id = irq_grp->grp_id;
877         int work_done = 0;
878         int i = 0, j;
879         int tot_work_done = 0;
880         enum dp_monitor_mode monitor_mode;
881         u8 ring_mask;
882
883         if (ab->hw_params->ring_mask->tx[grp_id]) {
884                 i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
885                 ath12k_dp_tx_completion_handler(ab, i);
886         }
887
888         if (ab->hw_params->ring_mask->rx_err[grp_id]) {
889                 work_done = ath12k_dp_rx_process_err(ab, napi, budget);
890                 budget -= work_done;
891                 tot_work_done += work_done;
892                 if (budget <= 0)
893                         goto done;
894         }
895
896         if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
897                 work_done = ath12k_dp_rx_process_wbm_err(ab,
898                                                          napi,
899                                                          budget);
900                 budget -= work_done;
901                 tot_work_done += work_done;
902
903                 if (budget <= 0)
904                         goto done;
905         }
906
907         if (ab->hw_params->ring_mask->rx[grp_id]) {
908                 i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
909                 work_done = ath12k_dp_rx_process(ab, i, napi,
910                                                  budget);
911                 budget -= work_done;
912                 tot_work_done += work_done;
913                 if (budget <= 0)
914                         goto done;
915         }
916
917         if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
918                 monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
919                 ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
920                 for (i = 0; i < ab->num_radios; i++) {
921                         for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
922                                 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
923
924                                 if (ring_mask & BIT(id)) {
925                                         work_done =
926                                         ath12k_dp_mon_process_ring(ab, id, napi, budget,
927                                                                    monitor_mode);
928                                         budget -= work_done;
929                                         tot_work_done += work_done;
930
931                                         if (budget <= 0)
932                                                 goto done;
933                                 }
934                         }
935                 }
936         }
937
938         if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
939                 monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
940                 ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
941                 for (i = 0; i < ab->num_radios; i++) {
942                         for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
943                                 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
944
945                                 if (ring_mask & BIT(id)) {
946                                         work_done =
947                                         ath12k_dp_mon_process_ring(ab, id, napi, budget,
948                                                                    monitor_mode);
949                                         budget -= work_done;
950                                         tot_work_done += work_done;
951
952                                         if (budget <= 0)
953                                                 goto done;
954                                 }
955                         }
956                 }
957         }
958
959         if (ab->hw_params->ring_mask->reo_status[grp_id])
960                 ath12k_dp_rx_process_reo_status(ab);
961
962         if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
963                 struct ath12k_dp *dp = &ab->dp;
964                 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
965                 LIST_HEAD(list);
966
967                 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
968         }
969
970         /* TODO: Implement handler for other interrupts */
971
972 done:
973         return tot_work_done;
974 }
975
976 void ath12k_dp_pdev_free(struct ath12k_base *ab)
977 {
978         int i;
979
980         del_timer_sync(&ab->mon_reap_timer);
981
982         for (i = 0; i < ab->num_radios; i++)
983                 ath12k_dp_rx_pdev_free(ab, i);
984 }
985
986 void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
987 {
988         struct ath12k *ar;
989         struct ath12k_pdev_dp *dp;
990         int i;
991
992         for (i = 0; i <  ab->num_radios; i++) {
993                 ar = ab->pdevs[i].ar;
994                 dp = &ar->dp;
995                 dp->mac_id = i;
996                 atomic_set(&dp->num_tx_pending, 0);
997                 init_waitqueue_head(&dp->tx_empty_waitq);
998
999                 /* TODO: Add any RXDMA setup required per pdev */
1000         }
1001 }
1002
1003 bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
1004 {
1005         if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
1006             ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
1007             ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
1008             ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
1009                 return true;
1010         }
1011         return false;
1012 }
1013
1014 void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
1015 {
1016         if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
1017                 /* RX TLVS compaction is supported, hence change the hal_rx_ops
1018                  * to compact hal_rx_ops.
1019                  */
1020                 ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
1021         }
1022         ab->hal.hal_desc_sz =
1023                 ab->hal_rx_ops->rx_desc_get_desc_size();
1024 }
1025
1026 static void ath12k_dp_service_mon_ring(struct timer_list *t)
1027 {
1028         struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
1029         int i;
1030
1031         for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
1032                 ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
1033                                            ATH12K_DP_RX_MONITOR_MODE);
1034
1035         mod_timer(&ab->mon_reap_timer, jiffies +
1036                   msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
1037 }
1038
1039 static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
1040 {
1041         if (ab->hw_params->rxdma1_enable)
1042                 return;
1043
1044         timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
1045 }
1046
1047 int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
1048 {
1049         struct ath12k *ar;
1050         int ret;
1051         int i;
1052
1053         ret = ath12k_dp_rx_htt_setup(ab);
1054         if (ret)
1055                 goto out;
1056
1057         ath12k_dp_mon_reap_timer_init(ab);
1058
1059         /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1060         for (i = 0; i < ab->num_radios; i++) {
1061                 ar = ab->pdevs[i].ar;
1062                 ret = ath12k_dp_rx_pdev_alloc(ab, i);
1063                 if (ret) {
1064                         ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
1065                                     i);
1066                         goto err;
1067                 }
1068                 ret = ath12k_dp_rx_pdev_mon_attach(ar);
1069                 if (ret) {
1070                         ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
1071                         goto err;
1072                 }
1073         }
1074
1075         return 0;
1076 err:
1077         ath12k_dp_pdev_free(ab);
1078 out:
1079         return ret;
1080 }
1081
1082 int ath12k_dp_htt_connect(struct ath12k_dp *dp)
1083 {
1084         struct ath12k_htc_svc_conn_req conn_req = {0};
1085         struct ath12k_htc_svc_conn_resp conn_resp = {0};
1086         int status;
1087
1088         conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
1089         conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
1090
1091         /* connect to control service */
1092         conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
1093
1094         status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
1095                                             &conn_resp);
1096
1097         if (status)
1098                 return status;
1099
1100         dp->eid = conn_resp.eid;
1101
1102         return 0;
1103 }
1104
1105 static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
1106 {
1107         switch (arvif->ahvif->vdev_type) {
1108         case WMI_VDEV_TYPE_STA:
1109                 /* TODO: Verify the search type and flags since ast hash
1110                  * is not part of peer mapv3
1111                  */
1112                 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
1113                 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1114                 break;
1115         case WMI_VDEV_TYPE_AP:
1116         case WMI_VDEV_TYPE_IBSS:
1117                 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1118                 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1119                 break;
1120         case WMI_VDEV_TYPE_MONITOR:
1121         default:
1122                 return;
1123         }
1124 }
1125
1126 void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
1127 {
1128         struct ath12k_base *ab = ar->ab;
1129
1130         arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
1131                                u32_encode_bits(arvif->vdev_id,
1132                                                HTT_TCL_META_DATA_VDEV_ID) |
1133                                u32_encode_bits(ar->pdev->pdev_id,
1134                                                HTT_TCL_META_DATA_PDEV_ID);
1135
1136         /* set HTT extension valid bit to 0 by default */
1137         arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1138
1139         ath12k_dp_update_vdev_search(arvif);
1140         arvif->vdev_id_check_en = true;
1141         arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
1142
1143         /* TODO: error path for bank id failure */
1144         if (arvif->bank_id == DP_INVALID_BANK_ID) {
1145                 ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
1146                 return;
1147         }
1148 }
1149
1150 static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
1151 {
1152         struct ath12k_rx_desc_info *desc_info;
1153         struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
1154         struct ath12k_dp *dp = &ab->dp;
1155         struct ath12k_skb_cb *skb_cb;
1156         struct sk_buff *skb;
1157         struct ath12k *ar;
1158         int i, j;
1159         u32 pool_id, tx_spt_page;
1160
1161         if (!dp->spt_info)
1162                 return;
1163
1164         /* RX Descriptor cleanup */
1165         spin_lock_bh(&dp->rx_desc_lock);
1166
1167         for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1168                 desc_info = dp->rxbaddr[i];
1169
1170                 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1171                         if (!desc_info[j].in_use) {
1172                                 list_del(&desc_info[j].list);
1173                                 continue;
1174                         }
1175
1176                         skb = desc_info[j].skb;
1177                         if (!skb)
1178                                 continue;
1179
1180                         dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
1181                                          skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
1182                         dev_kfree_skb_any(skb);
1183                 }
1184         }
1185
1186         for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1187                 if (!dp->rxbaddr[i])
1188                         continue;
1189
1190                 kfree(dp->rxbaddr[i]);
1191                 dp->rxbaddr[i] = NULL;
1192         }
1193
1194         spin_unlock_bh(&dp->rx_desc_lock);
1195
1196         /* TX Descriptor cleanup */
1197         for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1198                 spin_lock_bh(&dp->tx_desc_lock[i]);
1199
1200                 list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
1201                                          list) {
1202                         list_del(&tx_desc_info->list);
1203                         skb = tx_desc_info->skb;
1204
1205                         if (!skb)
1206                                 continue;
1207
1208                         /* if we are unregistering, hw would've been destroyed and
1209                          * ar is no longer valid.
1210                          */
1211                         if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
1212                                 skb_cb = ATH12K_SKB_CB(skb);
1213                                 ar = skb_cb->ar;
1214
1215                                 if (atomic_dec_and_test(&ar->dp.num_tx_pending))
1216                                         wake_up(&ar->dp.tx_empty_waitq);
1217                         }
1218
1219                         dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1220                                          skb->len, DMA_TO_DEVICE);
1221                         dev_kfree_skb_any(skb);
1222                 }
1223
1224                 spin_unlock_bh(&dp->tx_desc_lock[i]);
1225         }
1226
1227         for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1228                 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1229
1230                 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1231                         tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1232                         if (!dp->txbaddr[tx_spt_page])
1233                                 continue;
1234
1235                         kfree(dp->txbaddr[tx_spt_page]);
1236                         dp->txbaddr[tx_spt_page] = NULL;
1237                 }
1238
1239                 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1240         }
1241
1242         /* unmap SPT pages */
1243         for (i = 0; i < dp->num_spt_pages; i++) {
1244                 if (!dp->spt_info[i].vaddr)
1245                         continue;
1246
1247                 dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
1248                                   dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
1249                 dp->spt_info[i].vaddr = NULL;
1250         }
1251
1252         kfree(dp->spt_info);
1253         dp->spt_info = NULL;
1254 }
1255
1256 static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1257 {
1258         struct ath12k_dp *dp = &ab->dp;
1259
1260         if (!ab->hw_params->reoq_lut_support)
1261                 return;
1262
1263         if (!dp->reoq_lut.vaddr)
1264                 return;
1265
1266         dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
1267                           dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
1268         dp->reoq_lut.vaddr = NULL;
1269
1270         ath12k_hif_write32(ab,
1271                            HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
1272 }
1273
1274 void ath12k_dp_free(struct ath12k_base *ab)
1275 {
1276         struct ath12k_dp *dp = &ab->dp;
1277         int i;
1278
1279         ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1280                                     HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1281
1282         ath12k_dp_cc_cleanup(ab);
1283         ath12k_dp_reoq_lut_cleanup(ab);
1284         ath12k_dp_deinit_bank_profiles(ab);
1285         ath12k_dp_srng_common_cleanup(ab);
1286
1287         ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1288
1289         for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1290                 kfree(dp->tx_ring[i].tx_status);
1291                 dp->tx_ring[i].tx_status = NULL;
1292         }
1293
1294         ath12k_dp_rx_free(ab);
1295         /* Deinit any SOC level resource */
1296 }
1297
1298 void ath12k_dp_cc_config(struct ath12k_base *ab)
1299 {
1300         u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1301         u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1302         u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
1303         u32 val = 0;
1304
1305         ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
1306
1307         val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1308                                HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1309                 u32_encode_bits(ATH12K_CC_PPT_MSB,
1310                                 HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1311                 u32_encode_bits(ATH12K_CC_SPT_MSB,
1312                                 HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1313                 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
1314                 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
1315                 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
1316
1317         ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
1318
1319         /* Enable HW CC for WBM */
1320         ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
1321
1322         val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1323                               HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1324                 u32_encode_bits(ATH12K_CC_PPT_MSB,
1325                                 HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1326                 u32_encode_bits(ATH12K_CC_SPT_MSB,
1327                                 HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1328                 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
1329
1330         ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
1331
1332         /* Enable conversion complete indication */
1333         val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
1334         val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
1335                 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
1336                 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
1337
1338         ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
1339
1340         /* Enable Cookie conversion for WBM2SW Rings */
1341         val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
1342         val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
1343                ab->hw_params->hal_params->wbm2sw_cc_enable;
1344
1345         ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
1346 }
1347
1348 static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1349 {
1350         return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1351 }
1352
1353 static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
1354                                                    u16 ppt_idx, u16 spt_idx)
1355 {
1356         struct ath12k_dp *dp = &ab->dp;
1357
1358         return dp->spt_info[ppt_idx].vaddr + spt_idx;
1359 }
1360
1361 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
1362                                                   u32 cookie)
1363 {
1364         struct ath12k_dp *dp = &ab->dp;
1365         struct ath12k_rx_desc_info **desc_addr_ptr;
1366         u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1367
1368         ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1369         spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1370
1371         start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
1372         end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
1373
1374         if (ppt_idx < start_ppt_idx ||
1375             ppt_idx >= end_ppt_idx ||
1376             spt_idx > ATH12K_MAX_SPT_ENTRIES)
1377                 return NULL;
1378
1379         ppt_idx = ppt_idx - dp->rx_ppt_base;
1380         desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1381
1382         return *desc_addr_ptr;
1383 }
1384
1385 struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
1386                                                   u32 cookie)
1387 {
1388         struct ath12k_tx_desc_info **desc_addr_ptr;
1389         u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1390
1391         ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1392         spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1393
1394         start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
1395         end_ppt_idx = start_ppt_idx +
1396                       (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
1397
1398         if (ppt_idx < start_ppt_idx ||
1399             ppt_idx >= end_ppt_idx ||
1400             spt_idx > ATH12K_MAX_SPT_ENTRIES)
1401                 return NULL;
1402
1403         desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1404
1405         return *desc_addr_ptr;
1406 }
1407
1408 static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1409 {
1410         struct ath12k_dp *dp = &ab->dp;
1411         struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1412         struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1413         u32 i, j, pool_id, tx_spt_page;
1414         u32 ppt_idx, cookie_ppt_idx;
1415
1416         spin_lock_bh(&dp->rx_desc_lock);
1417
1418         /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1419         for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1420                 rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
1421                                    GFP_ATOMIC);
1422
1423                 if (!rx_descs) {
1424                         spin_unlock_bh(&dp->rx_desc_lock);
1425                         return -ENOMEM;
1426                 }
1427
1428                 ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
1429                 cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
1430                 dp->rxbaddr[i] = &rx_descs[0];
1431
1432                 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1433                         rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
1434                         rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1435                         list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
1436
1437                         /* Update descriptor VA in SPT */
1438                         rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1439                         *rx_desc_addr = &rx_descs[j];
1440                 }
1441         }
1442
1443         spin_unlock_bh(&dp->rx_desc_lock);
1444
1445         for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1446                 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1447                 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1448                         tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
1449                                            GFP_ATOMIC);
1450
1451                         if (!tx_descs) {
1452                                 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1453                                 /* Caller takes care of TX pending and RX desc cleanup */
1454                                 return -ENOMEM;
1455                         }
1456
1457                         tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1458                         ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
1459
1460                         dp->txbaddr[tx_spt_page] = &tx_descs[0];
1461
1462                         for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1463                                 tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1464                                 tx_descs[j].pool_id = pool_id;
1465                                 list_add_tail(&tx_descs[j].list,
1466                                               &dp->tx_desc_free_list[pool_id]);
1467
1468                                 /* Update descriptor VA in SPT */
1469                                 tx_desc_addr =
1470                                         ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1471                                 *tx_desc_addr = &tx_descs[j];
1472                         }
1473                 }
1474                 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1475         }
1476         return 0;
1477 }
1478
1479 static int ath12k_dp_cmem_init(struct ath12k_base *ab,
1480                                struct ath12k_dp *dp,
1481                                enum ath12k_dp_desc_type type)
1482 {
1483         u32 cmem_base;
1484         int i, start, end;
1485
1486         cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1487
1488         switch (type) {
1489         case ATH12K_DP_TX_DESC:
1490                 start = ATH12K_TX_SPT_PAGE_OFFSET;
1491                 end = start + ATH12K_NUM_TX_SPT_PAGES;
1492                 break;
1493         case ATH12K_DP_RX_DESC:
1494                 cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
1495                 start = ATH12K_RX_SPT_PAGE_OFFSET;
1496                 end = start + ATH12K_NUM_RX_SPT_PAGES;
1497                 break;
1498         default:
1499                 ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
1500                 return -EINVAL;
1501         }
1502
1503         /* Write to PPT in CMEM */
1504         for (i = start; i < end; i++)
1505                 ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1506                                    dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1507
1508         return 0;
1509 }
1510
1511 static int ath12k_dp_cc_init(struct ath12k_base *ab)
1512 {
1513         struct ath12k_dp *dp = &ab->dp;
1514         int i, ret = 0;
1515
1516         INIT_LIST_HEAD(&dp->rx_desc_free_list);
1517         spin_lock_init(&dp->rx_desc_lock);
1518
1519         for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1520                 INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
1521                 INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
1522                 spin_lock_init(&dp->tx_desc_lock[i]);
1523         }
1524
1525         dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
1526         if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1527                 dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1528
1529         dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
1530                                GFP_KERNEL);
1531
1532         if (!dp->spt_info) {
1533                 ath12k_warn(ab, "SPT page allocation failure");
1534                 return -ENOMEM;
1535         }
1536
1537         dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
1538
1539         for (i = 0; i < dp->num_spt_pages; i++) {
1540                 dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
1541                                                            ATH12K_PAGE_SIZE,
1542                                                            &dp->spt_info[i].paddr,
1543                                                            GFP_KERNEL);
1544
1545                 if (!dp->spt_info[i].vaddr) {
1546                         ret = -ENOMEM;
1547                         goto free;
1548                 }
1549
1550                 if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1551                         ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1552                         ret = -EINVAL;
1553                         goto free;
1554                 }
1555         }
1556
1557         ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
1558         if (ret) {
1559                 ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
1560                 goto free;
1561         }
1562
1563         ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
1564         if (ret) {
1565                 ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
1566                 goto free;
1567         }
1568
1569         ret = ath12k_dp_cc_desc_init(ab);
1570         if (ret) {
1571                 ath12k_warn(ab, "HW CC desc init failed %d", ret);
1572                 goto free;
1573         }
1574
1575         return 0;
1576 free:
1577         ath12k_dp_cc_cleanup(ab);
1578         return ret;
1579 }
1580
1581 static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1582 {
1583         struct ath12k_dp *dp = &ab->dp;
1584
1585         if (!ab->hw_params->reoq_lut_support)
1586                 return 0;
1587
1588         dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
1589                                                 DP_REOQ_LUT_SIZE,
1590                                                 &dp->reoq_lut.paddr,
1591                                                 GFP_KERNEL | __GFP_ZERO);
1592         if (!dp->reoq_lut.vaddr) {
1593                 ath12k_warn(ab, "failed to allocate memory for reoq table");
1594                 return -ENOMEM;
1595         }
1596
1597         ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
1598                            dp->reoq_lut.paddr);
1599         return 0;
1600 }
1601
1602 static enum hal_rx_buf_return_buf_manager
1603 ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
1604 {
1605         switch (ab->device_id) {
1606         case 0:
1607                 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
1608         case 1:
1609                 return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
1610         case 2:
1611                 return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
1612         default:
1613                 ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
1614                             ab->device_id);
1615                 WARN_ON(1);
1616                 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
1617         }
1618 }
1619
1620 int ath12k_dp_alloc(struct ath12k_base *ab)
1621 {
1622         struct ath12k_dp *dp = &ab->dp;
1623         struct hal_srng *srng = NULL;
1624         size_t size = 0;
1625         u32 n_link_desc = 0;
1626         int ret;
1627         int i;
1628
1629         dp->ab = ab;
1630
1631         INIT_LIST_HEAD(&dp->reo_cmd_list);
1632         INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1633         spin_lock_init(&dp->reo_cmd_lock);
1634
1635         dp->reo_cmd_cache_flush_count = 0;
1636         dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
1637
1638         ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
1639         if (ret) {
1640                 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1641                 return ret;
1642         }
1643
1644         srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1645
1646         ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
1647                                         HAL_WBM_IDLE_LINK, srng, n_link_desc);
1648         if (ret) {
1649                 ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1650                 return ret;
1651         }
1652
1653         ret = ath12k_dp_cc_init(ab);
1654
1655         if (ret) {
1656                 ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1657                 goto fail_link_desc_cleanup;
1658         }
1659         ret = ath12k_dp_init_bank_profiles(ab);
1660         if (ret) {
1661                 ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1662                 goto fail_hw_cc_cleanup;
1663         }
1664
1665         ret = ath12k_dp_srng_common_setup(ab);
1666         if (ret)
1667                 goto fail_dp_bank_profiles_cleanup;
1668
1669         size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
1670
1671         ret = ath12k_dp_reoq_lut_setup(ab);
1672         if (ret) {
1673                 ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1674                 goto fail_cmn_srng_cleanup;
1675         }
1676
1677         for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1678                 dp->tx_ring[i].tcl_data_ring_id = i;
1679
1680                 dp->tx_ring[i].tx_status_head = 0;
1681                 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1682                 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1683                 if (!dp->tx_ring[i].tx_status) {
1684                         ret = -ENOMEM;
1685                         /* FIXME: The allocated tx status is not freed
1686                          * properly here
1687                          */
1688                         goto fail_cmn_reoq_cleanup;
1689                 }
1690         }
1691
1692         for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1693                 ath12k_hal_tx_set_dscp_tid_map(ab, i);
1694
1695         ret = ath12k_dp_rx_alloc(ab);
1696         if (ret)
1697                 goto fail_dp_rx_free;
1698
1699         /* Init any SOC level resource for DP */
1700
1701         return 0;
1702
1703 fail_dp_rx_free:
1704         ath12k_dp_rx_free(ab);
1705
1706 fail_cmn_reoq_cleanup:
1707         ath12k_dp_reoq_lut_cleanup(ab);
1708
1709 fail_cmn_srng_cleanup:
1710         ath12k_dp_srng_common_cleanup(ab);
1711
1712 fail_dp_bank_profiles_cleanup:
1713         ath12k_dp_deinit_bank_profiles(ab);
1714
1715 fail_hw_cc_cleanup:
1716         ath12k_dp_cc_cleanup(ab);
1717
1718 fail_link_desc_cleanup:
1719         ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1720                                     HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1721
1722         return ret;
1723 }
This page took 0.125707 seconds and 4 git commands to generate.