]> Git Repo - linux.git/blob - drivers/misc/habanalabs/hw_queue.c
efi/x86: add headroom to decompressor BSS to account for setup block
[linux.git] / drivers / misc / habanalabs / hw_queue.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include "habanalabs.h"
9
10 #include <linux/slab.h>
11
12 /*
13  * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
14  *
15  * @ptr: the current pi/ci value
16  * @val: the amount to add
17  *
18  * Add val to ptr. It can go until twice the queue length.
19  */
20 inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
21 {
22         ptr += val;
23         ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
24         return ptr;
25 }
26
27 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
28 {
29         int delta = (q->pi - q->ci);
30
31         if (delta >= 0)
32                 return (queue_len - delta);
33         else
34                 return (abs(delta) - queue_len);
35 }
36
37 void hl_int_hw_queue_update_ci(struct hl_cs *cs)
38 {
39         struct hl_device *hdev = cs->ctx->hdev;
40         struct hl_hw_queue *q;
41         int i;
42
43         hdev->asic_funcs->hw_queues_lock(hdev);
44
45         if (hdev->disabled)
46                 goto out;
47
48         q = &hdev->kernel_queues[0];
49         for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
50                 if (q->queue_type == QUEUE_TYPE_INT) {
51                         q->ci += cs->jobs_in_queue_cnt[i];
52                         q->ci &= ((q->int_queue_len << 1) - 1);
53                 }
54         }
55
56 out:
57         hdev->asic_funcs->hw_queues_unlock(hdev);
58 }
59
60 /*
61  * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
62  *                                H/W queue.
63  * @hdev: pointer to habanalabs device structure
64  * @q: pointer to habanalabs queue structure
65  * @ctl: BD's control word
66  * @len: BD's length
67  * @ptr: BD's pointer
68  *
69  * This function assumes there is enough space on the queue to submit a new
70  * BD to it. It initializes the next BD and calls the device specific
71  * function to set the pi (and doorbell)
72  *
73  * This function must be called when the scheduler mutex is taken
74  *
75  */
76 static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
77                         struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
78 {
79         struct hl_bd *bd;
80
81         bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
82         bd += hl_pi_2_offset(q->pi);
83         bd->ctl = cpu_to_le32(ctl);
84         bd->len = cpu_to_le32(len);
85         bd->ptr = cpu_to_le64(ptr);
86
87         q->pi = hl_queue_inc_ptr(q->pi);
88         hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
89 }
90
91 /*
92  * ext_queue_sanity_checks - perform some sanity checks on external queue
93  *
94  * @hdev              : pointer to hl_device structure
95  * @q                 : pointer to hl_hw_queue structure
96  * @num_of_entries    : how many entries to check for space
97  * @reserve_cq_entry  : whether to reserve an entry in the cq
98  *
99  * H/W queues spinlock should be taken before calling this function
100  *
101  * Perform the following:
102  * - Make sure we have enough space in the h/w queue
103  * - Make sure we have enough space in the completion queue
104  * - Reserve space in the completion queue (needs to be reversed if there
105  *   is a failure down the road before the actual submission of work). Only
106  *   do this action if reserve_cq_entry is true
107  *
108  */
109 static int ext_queue_sanity_checks(struct hl_device *hdev,
110                                 struct hl_hw_queue *q, int num_of_entries,
111                                 bool reserve_cq_entry)
112 {
113         atomic_t *free_slots =
114                         &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
115         int free_slots_cnt;
116
117         /* Check we have enough space in the queue */
118         free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
119
120         if (free_slots_cnt < num_of_entries) {
121                 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
122                         q->hw_queue_id, num_of_entries);
123                 return -EAGAIN;
124         }
125
126         if (reserve_cq_entry) {
127                 /*
128                  * Check we have enough space in the completion queue
129                  * Add -1 to counter (decrement) unless counter was already 0
130                  * In that case, CQ is full so we can't submit a new CB because
131                  * we won't get ack on its completion
132                  * atomic_add_unless will return 0 if counter was already 0
133                  */
134                 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
135                         dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
136                                 num_of_entries, q->hw_queue_id);
137                         atomic_add(num_of_entries, free_slots);
138                         return -EAGAIN;
139                 }
140         }
141
142         return 0;
143 }
144
145 /*
146  * int_queue_sanity_checks - perform some sanity checks on internal queue
147  *
148  * @hdev              : pointer to hl_device structure
149  * @q                 : pointer to hl_hw_queue structure
150  * @num_of_entries    : how many entries to check for space
151  *
152  * H/W queues spinlock should be taken before calling this function
153  *
154  * Perform the following:
155  * - Make sure we have enough space in the h/w queue
156  *
157  */
158 static int int_queue_sanity_checks(struct hl_device *hdev,
159                                         struct hl_hw_queue *q,
160                                         int num_of_entries)
161 {
162         int free_slots_cnt;
163
164         /* Check we have enough space in the queue */
165         free_slots_cnt = queue_free_slots(q, q->int_queue_len);
166
167         if (free_slots_cnt < num_of_entries) {
168                 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
169                         q->hw_queue_id, num_of_entries);
170                 return -EAGAIN;
171         }
172
173         return 0;
174 }
175
176 /*
177  * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
178  * @hdev: Pointer to hl_device structure.
179  * @q: Pointer to hl_hw_queue structure.
180  * @num_of_entries: How many entries to check for space.
181  *
182  * Perform the following:
183  * - Make sure we have enough space in the completion queue.
184  *   This check also ensures that there is enough space in the h/w queue, as
185  *   both queues are of the same size.
186  * - Reserve space in the completion queue (needs to be reversed if there
187  *   is a failure down the road before the actual submission of work).
188  *
189  * Both operations are done using the "free_slots_cnt" field of the completion
190  * queue. The CI counters of the queue and the completion queue are not
191  * needed/used for the H/W queue type.
192  */
193 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
194                                         int num_of_entries)
195 {
196         atomic_t *free_slots =
197                         &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
198
199         /*
200          * Check we have enough space in the completion queue.
201          * Add -1 to counter (decrement) unless counter was already 0.
202          * In that case, CQ is full so we can't submit a new CB.
203          * atomic_add_unless will return 0 if counter was already 0.
204          */
205         if (atomic_add_negative(num_of_entries * -1, free_slots)) {
206                 dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
207                         num_of_entries, q->hw_queue_id);
208                 atomic_add(num_of_entries, free_slots);
209                 return -EAGAIN;
210         }
211
212         return 0;
213 }
214
215 /*
216  * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
217  *
218  * @hdev: pointer to hl_device structure
219  * @hw_queue_id: Queue's type
220  * @cb_size: size of CB
221  * @cb_ptr: pointer to CB location
222  *
223  * This function sends a single CB, that must NOT generate a completion entry
224  *
225  */
226 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
227                                 u32 cb_size, u64 cb_ptr)
228 {
229         struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
230         int rc = 0;
231
232         /*
233          * The CPU queue is a synchronous queue with an effective depth of
234          * a single entry (although it is allocated with room for multiple
235          * entries). Therefore, there is a different lock, called
236          * send_cpu_message_lock, that serializes accesses to the CPU queue.
237          * As a result, we don't need to lock the access to the entire H/W
238          * queues module when submitting a JOB to the CPU queue
239          */
240         if (q->queue_type != QUEUE_TYPE_CPU)
241                 hdev->asic_funcs->hw_queues_lock(hdev);
242
243         if (hdev->disabled) {
244                 rc = -EPERM;
245                 goto out;
246         }
247
248         /*
249          * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
250          * type only on init phase, when the queues are empty and being tested,
251          * so there is no need for sanity checks.
252          */
253         if (q->queue_type != QUEUE_TYPE_HW) {
254                 rc = ext_queue_sanity_checks(hdev, q, 1, false);
255                 if (rc)
256                         goto out;
257         }
258
259         ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
260
261 out:
262         if (q->queue_type != QUEUE_TYPE_CPU)
263                 hdev->asic_funcs->hw_queues_unlock(hdev);
264
265         return rc;
266 }
267
268 /*
269  * ext_queue_schedule_job - submit a JOB to an external queue
270  *
271  * @job: pointer to the job that needs to be submitted to the queue
272  *
273  * This function must be called when the scheduler mutex is taken
274  *
275  */
276 static void ext_queue_schedule_job(struct hl_cs_job *job)
277 {
278         struct hl_device *hdev = job->cs->ctx->hdev;
279         struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
280         struct hl_cq_entry cq_pkt;
281         struct hl_cq *cq;
282         u64 cq_addr;
283         struct hl_cb *cb;
284         u32 ctl;
285         u32 len;
286         u64 ptr;
287
288         /*
289          * Update the JOB ID inside the BD CTL so the device would know what
290          * to write in the completion queue
291          */
292         ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
293
294         cb = job->patched_cb;
295         len = job->job_cb_size;
296         ptr = cb->bus_address;
297
298         cq_pkt.data = cpu_to_le32(
299                                 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
300                                         & CQ_ENTRY_SHADOW_INDEX_MASK) |
301                                 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
302                                 (1 << CQ_ENTRY_READY_SHIFT));
303
304         /*
305          * No need to protect pi_offset because scheduling to the
306          * H/W queues is done under the scheduler mutex
307          *
308          * No need to check if CQ is full because it was already
309          * checked in ext_queue_sanity_checks
310          */
311         cq = &hdev->completion_queue[q->hw_queue_id];
312         cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
313
314         hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
315                                                 cq_addr,
316                                                 le32_to_cpu(cq_pkt.data),
317                                                 q->hw_queue_id);
318
319         q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
320
321         cq->pi = hl_cq_inc_ptr(cq->pi);
322
323         ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
324 }
325
326 /*
327  * int_queue_schedule_job - submit a JOB to an internal queue
328  *
329  * @job: pointer to the job that needs to be submitted to the queue
330  *
331  * This function must be called when the scheduler mutex is taken
332  *
333  */
334 static void int_queue_schedule_job(struct hl_cs_job *job)
335 {
336         struct hl_device *hdev = job->cs->ctx->hdev;
337         struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
338         struct hl_bd bd;
339         __le64 *pi;
340
341         bd.ctl = 0;
342         bd.len = cpu_to_le32(job->job_cb_size);
343         bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
344
345         pi = (__le64 *) (uintptr_t) (q->kernel_address +
346                 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
347
348         q->pi++;
349         q->pi &= ((q->int_queue_len << 1) - 1);
350
351         hdev->asic_funcs->pqe_write(hdev, pi, &bd);
352
353         hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
354 }
355
356 /*
357  * hw_queue_schedule_job - submit a JOB to a H/W queue
358  *
359  * @job: pointer to the job that needs to be submitted to the queue
360  *
361  * This function must be called when the scheduler mutex is taken
362  *
363  */
364 static void hw_queue_schedule_job(struct hl_cs_job *job)
365 {
366         struct hl_device *hdev = job->cs->ctx->hdev;
367         struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
368         struct hl_cq *cq;
369         u64 ptr;
370         u32 offset, ctl, len;
371
372         /*
373          * Upon PQE completion, COMP_DATA is used as the write data to the
374          * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
375          * write address offset in the SM block (QMAN LBW message).
376          * The write address offset is calculated as "COMP_OFFSET << 2".
377          */
378         offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
379         ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
380                 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
381
382         len = job->job_cb_size;
383
384         /*
385          * A patched CB is created only if a user CB was allocated by driver and
386          * MMU is disabled. If MMU is enabled, the user CB should be used
387          * instead. If the user CB wasn't allocated by driver, assume that it
388          * holds an address.
389          */
390         if (job->patched_cb)
391                 ptr = job->patched_cb->bus_address;
392         else if (job->is_kernel_allocated_cb)
393                 ptr = job->user_cb->bus_address;
394         else
395                 ptr = (u64) (uintptr_t) job->user_cb;
396
397         /*
398          * No need to protect pi_offset because scheduling to the
399          * H/W queues is done under the scheduler mutex
400          *
401          * No need to check if CQ is full because it was already
402          * checked in hw_queue_sanity_checks
403          */
404         cq = &hdev->completion_queue[q->hw_queue_id];
405         cq->pi = hl_cq_inc_ptr(cq->pi);
406
407         ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
408 }
409
410 /*
411  * hl_hw_queue_schedule_cs - schedule a command submission
412  *
413  * @job        : pointer to the CS
414  *
415  */
416 int hl_hw_queue_schedule_cs(struct hl_cs *cs)
417 {
418         struct hl_device *hdev = cs->ctx->hdev;
419         struct hl_cs_job *job, *tmp;
420         struct hl_hw_queue *q;
421         int rc = 0, i, cq_cnt;
422
423         hdev->asic_funcs->hw_queues_lock(hdev);
424
425         if (hl_device_disabled_or_in_reset(hdev)) {
426                 dev_err(hdev->dev,
427                         "device is disabled or in reset, CS rejected!\n");
428                 rc = -EPERM;
429                 goto out;
430         }
431
432         q = &hdev->kernel_queues[0];
433         for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
434                 if (cs->jobs_in_queue_cnt[i]) {
435                         switch (q->queue_type) {
436                         case QUEUE_TYPE_EXT:
437                                 rc = ext_queue_sanity_checks(hdev, q,
438                                                 cs->jobs_in_queue_cnt[i], true);
439                                 break;
440                         case QUEUE_TYPE_INT:
441                                 rc = int_queue_sanity_checks(hdev, q,
442                                                 cs->jobs_in_queue_cnt[i]);
443                                 break;
444                         case QUEUE_TYPE_HW:
445                                 rc = hw_queue_sanity_checks(hdev, q,
446                                                 cs->jobs_in_queue_cnt[i]);
447                                 break;
448                         default:
449                                 dev_err(hdev->dev, "Queue type %d is invalid\n",
450                                         q->queue_type);
451                                 rc = -EINVAL;
452                                 break;
453                         }
454
455                         if (rc)
456                                 goto unroll_cq_resv;
457
458                         if (q->queue_type == QUEUE_TYPE_EXT ||
459                                         q->queue_type == QUEUE_TYPE_HW)
460                                 cq_cnt++;
461                 }
462         }
463
464         spin_lock(&hdev->hw_queues_mirror_lock);
465         list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
466
467         /* Queue TDR if the CS is the first entry and if timeout is wanted */
468         if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
469                         (list_first_entry(&hdev->hw_queues_mirror_list,
470                                         struct hl_cs, mirror_node) == cs)) {
471                 cs->tdr_active = true;
472                 schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
473                 spin_unlock(&hdev->hw_queues_mirror_lock);
474         } else {
475                 spin_unlock(&hdev->hw_queues_mirror_lock);
476         }
477
478         if (!hdev->cs_active_cnt++) {
479                 struct hl_device_idle_busy_ts *ts;
480
481                 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
482                 ts->busy_to_idle_ts = ktime_set(0, 0);
483                 ts->idle_to_busy_ts = ktime_get();
484         }
485
486         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
487                 switch (job->queue_type) {
488                 case QUEUE_TYPE_EXT:
489                         ext_queue_schedule_job(job);
490                         break;
491                 case QUEUE_TYPE_INT:
492                         int_queue_schedule_job(job);
493                         break;
494                 case QUEUE_TYPE_HW:
495                         hw_queue_schedule_job(job);
496                         break;
497                 default:
498                         break;
499                 }
500
501         cs->submitted = true;
502
503         goto out;
504
505 unroll_cq_resv:
506         q = &hdev->kernel_queues[0];
507         for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
508                 if ((q->queue_type == QUEUE_TYPE_EXT ||
509                                 q->queue_type == QUEUE_TYPE_HW) &&
510                                 cs->jobs_in_queue_cnt[i]) {
511                         atomic_t *free_slots =
512                                 &hdev->completion_queue[i].free_slots_cnt;
513                         atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
514                         cq_cnt--;
515                 }
516         }
517
518 out:
519         hdev->asic_funcs->hw_queues_unlock(hdev);
520
521         return rc;
522 }
523
524 /*
525  * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
526  *
527  * @hdev: pointer to hl_device structure
528  * @hw_queue_id: which queue to increment its ci
529  */
530 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
531 {
532         struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
533
534         q->ci = hl_queue_inc_ptr(q->ci);
535 }
536
537 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
538                                         bool is_cpu_queue)
539 {
540         void *p;
541         int rc;
542
543         if (is_cpu_queue)
544                 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
545                                                         HL_QUEUE_SIZE_IN_BYTES,
546                                                         &q->bus_address);
547         else
548                 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
549                                                 HL_QUEUE_SIZE_IN_BYTES,
550                                                 &q->bus_address,
551                                                 GFP_KERNEL | __GFP_ZERO);
552         if (!p)
553                 return -ENOMEM;
554
555         q->kernel_address = (u64) (uintptr_t) p;
556
557         q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
558                                         sizeof(*q->shadow_queue),
559                                         GFP_KERNEL);
560         if (!q->shadow_queue) {
561                 dev_err(hdev->dev,
562                         "Failed to allocate shadow queue for H/W queue %d\n",
563                         q->hw_queue_id);
564                 rc = -ENOMEM;
565                 goto free_queue;
566         }
567
568         /* Make sure read/write pointers are initialized to start of queue */
569         q->ci = 0;
570         q->pi = 0;
571
572         return 0;
573
574 free_queue:
575         if (is_cpu_queue)
576                 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
577                                         HL_QUEUE_SIZE_IN_BYTES,
578                                         (void *) (uintptr_t) q->kernel_address);
579         else
580                 hdev->asic_funcs->asic_dma_free_coherent(hdev,
581                                         HL_QUEUE_SIZE_IN_BYTES,
582                                         (void *) (uintptr_t) q->kernel_address,
583                                         q->bus_address);
584
585         return rc;
586 }
587
588 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
589 {
590         void *p;
591
592         p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
593                                         &q->bus_address, &q->int_queue_len);
594         if (!p) {
595                 dev_err(hdev->dev,
596                         "Failed to get base address for internal queue %d\n",
597                         q->hw_queue_id);
598                 return -EFAULT;
599         }
600
601         q->kernel_address = (u64) (uintptr_t) p;
602         q->pi = 0;
603         q->ci = 0;
604
605         return 0;
606 }
607
608 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
609 {
610         return ext_and_cpu_queue_init(hdev, q, true);
611 }
612
613 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
614 {
615         return ext_and_cpu_queue_init(hdev, q, false);
616 }
617
618 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
619 {
620         void *p;
621
622         p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
623                                                 HL_QUEUE_SIZE_IN_BYTES,
624                                                 &q->bus_address,
625                                                 GFP_KERNEL | __GFP_ZERO);
626         if (!p)
627                 return -ENOMEM;
628
629         q->kernel_address = (u64) (uintptr_t) p;
630
631         /* Make sure read/write pointers are initialized to start of queue */
632         q->ci = 0;
633         q->pi = 0;
634
635         return 0;
636 }
637
638 /*
639  * queue_init - main initialization function for H/W queue object
640  *
641  * @hdev: pointer to hl_device device structure
642  * @q: pointer to hl_hw_queue queue structure
643  * @hw_queue_id: The id of the H/W queue
644  *
645  * Allocate dma-able memory for the queue and initialize fields
646  * Returns 0 on success
647  */
648 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
649                         u32 hw_queue_id)
650 {
651         int rc;
652
653         BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
654
655         q->hw_queue_id = hw_queue_id;
656
657         switch (q->queue_type) {
658         case QUEUE_TYPE_EXT:
659                 rc = ext_queue_init(hdev, q);
660                 break;
661         case QUEUE_TYPE_INT:
662                 rc = int_queue_init(hdev, q);
663                 break;
664         case QUEUE_TYPE_CPU:
665                 rc = cpu_queue_init(hdev, q);
666                 break;
667         case QUEUE_TYPE_HW:
668                 rc = hw_queue_init(hdev, q);
669                 break;
670         case QUEUE_TYPE_NA:
671                 q->valid = 0;
672                 return 0;
673         default:
674                 dev_crit(hdev->dev, "wrong queue type %d during init\n",
675                         q->queue_type);
676                 rc = -EINVAL;
677                 break;
678         }
679
680         if (rc)
681                 return rc;
682
683         q->valid = 1;
684
685         return 0;
686 }
687
688 /*
689  * hw_queue_fini - destroy queue
690  *
691  * @hdev: pointer to hl_device device structure
692  * @q: pointer to hl_hw_queue queue structure
693  *
694  * Free the queue memory
695  */
696 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
697 {
698         if (!q->valid)
699                 return;
700
701         /*
702          * If we arrived here, there are no jobs waiting on this queue
703          * so we can safely remove it.
704          * This is because this function can only called when:
705          * 1. Either a context is deleted, which only can occur if all its
706          *    jobs were finished
707          * 2. A context wasn't able to be created due to failure or timeout,
708          *    which means there are no jobs on the queue yet
709          *
710          * The only exception are the queues of the kernel context, but
711          * if they are being destroyed, it means that the entire module is
712          * being removed. If the module is removed, it means there is no open
713          * user context. It also means that if a job was submitted by
714          * the kernel driver (e.g. context creation), the job itself was
715          * released by the kernel driver when a timeout occurred on its
716          * Completion. Thus, we don't need to release it again.
717          */
718
719         if (q->queue_type == QUEUE_TYPE_INT)
720                 return;
721
722         kfree(q->shadow_queue);
723
724         if (q->queue_type == QUEUE_TYPE_CPU)
725                 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
726                                         HL_QUEUE_SIZE_IN_BYTES,
727                                         (void *) (uintptr_t) q->kernel_address);
728         else
729                 hdev->asic_funcs->asic_dma_free_coherent(hdev,
730                                         HL_QUEUE_SIZE_IN_BYTES,
731                                         (void *) (uintptr_t) q->kernel_address,
732                                         q->bus_address);
733 }
734
735 int hl_hw_queues_create(struct hl_device *hdev)
736 {
737         struct asic_fixed_properties *asic = &hdev->asic_prop;
738         struct hl_hw_queue *q;
739         int i, rc, q_ready_cnt;
740
741         hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
742                                 sizeof(*hdev->kernel_queues), GFP_KERNEL);
743
744         if (!hdev->kernel_queues) {
745                 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
746                 return -ENOMEM;
747         }
748
749         /* Initialize the H/W queues */
750         for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
751                         i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
752
753                 q->queue_type = asic->hw_queues_props[i].type;
754                 rc = queue_init(hdev, q, i);
755                 if (rc) {
756                         dev_err(hdev->dev,
757                                 "failed to initialize queue %d\n", i);
758                         goto release_queues;
759                 }
760         }
761
762         return 0;
763
764 release_queues:
765         for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
766                 queue_fini(hdev, q);
767
768         kfree(hdev->kernel_queues);
769
770         return rc;
771 }
772
773 void hl_hw_queues_destroy(struct hl_device *hdev)
774 {
775         struct hl_hw_queue *q;
776         int i;
777
778         for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
779                 queue_fini(hdev, q);
780
781         kfree(hdev->kernel_queues);
782 }
783
784 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
785 {
786         struct hl_hw_queue *q;
787         int i;
788
789         for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
790                 if ((!q->valid) ||
791                         ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
792                         continue;
793                 q->pi = q->ci = 0;
794         }
795 }
This page took 0.079806 seconds and 4 git commands to generate.