]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
Merge tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux
[linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_packet_manager.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_opcodes.h"
31
32 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
33                                 unsigned int buffer_size_bytes)
34 {
35         unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
36
37         WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
38              "Runlist IB overflow");
39         *wptr = temp;
40 }
41
42 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
43 {
44         union PM4_MES_TYPE_3_HEADER header;
45
46         header.u32All = 0;
47         header.opcode = opcode;
48         header.count = packet_size / 4 - 2;
49         header.type = PM4_TYPE_3;
50
51         return header.u32All;
52 }
53
54 static void pm_calc_rlib_size(struct packet_manager *pm,
55                                 unsigned int *rlib_size,
56                                 bool *over_subscription)
57 {
58         unsigned int process_count, queue_count, compute_queue_count;
59         unsigned int map_queue_size;
60         unsigned int max_proc_per_quantum = 1;
61         struct kfd_dev *dev = pm->dqm->dev;
62
63         process_count = pm->dqm->processes_count;
64         queue_count = pm->dqm->queue_count;
65         compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
66
67         /* check if there is over subscription
68          * Note: the arbitration between the number of VMIDs and
69          * hws_max_conc_proc has been done in
70          * kgd2kfd_device_init().
71          */
72         *over_subscription = false;
73
74         if (dev->max_proc_per_quantum > 1)
75                 max_proc_per_quantum = dev->max_proc_per_quantum;
76
77         if ((process_count > max_proc_per_quantum) ||
78             compute_queue_count > get_queues_num(pm->dqm)) {
79                 *over_subscription = true;
80                 pr_debug("Over subscribed runlist\n");
81         }
82
83         map_queue_size = sizeof(struct pm4_mes_map_queues);
84         /* calculate run list ib allocation size */
85         *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
86                      queue_count * map_queue_size;
87
88         /*
89          * Increase the allocation size in case we need a chained run list
90          * when over subscription
91          */
92         if (*over_subscription)
93                 *rlib_size += sizeof(struct pm4_mes_runlist);
94
95         pr_debug("runlist ib size %d\n", *rlib_size);
96 }
97
98 static int pm_allocate_runlist_ib(struct packet_manager *pm,
99                                 unsigned int **rl_buffer,
100                                 uint64_t *rl_gpu_buffer,
101                                 unsigned int *rl_buffer_size,
102                                 bool *is_over_subscription)
103 {
104         int retval;
105
106         if (WARN_ON(pm->allocated))
107                 return -EINVAL;
108
109         pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
110
111         retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
112                                         &pm->ib_buffer_obj);
113
114         if (retval) {
115                 pr_err("Failed to allocate runlist IB\n");
116                 return retval;
117         }
118
119         *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
120         *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
121
122         memset(*rl_buffer, 0, *rl_buffer_size);
123         pm->allocated = true;
124         return retval;
125 }
126
127 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
128                         uint64_t ib, size_t ib_size_in_dwords, bool chain)
129 {
130         struct pm4_mes_runlist *packet;
131         int concurrent_proc_cnt = 0;
132         struct kfd_dev *kfd = pm->dqm->dev;
133
134         if (WARN_ON(!ib))
135                 return -EFAULT;
136
137         /* Determine the number of processes to map together to HW:
138          * it can not exceed the number of VMIDs available to the
139          * scheduler, and it is determined by the smaller of the number
140          * of processes in the runlist and kfd module parameter
141          * hws_max_conc_proc.
142          * Note: the arbitration between the number of VMIDs and
143          * hws_max_conc_proc has been done in
144          * kgd2kfd_device_init().
145          */
146         concurrent_proc_cnt = min(pm->dqm->processes_count,
147                         kfd->max_proc_per_quantum);
148
149         packet = (struct pm4_mes_runlist *)buffer;
150
151         memset(buffer, 0, sizeof(struct pm4_mes_runlist));
152         packet->header.u32All = build_pm4_header(IT_RUN_LIST,
153                                                 sizeof(struct pm4_mes_runlist));
154
155         packet->bitfields4.ib_size = ib_size_in_dwords;
156         packet->bitfields4.chain = chain ? 1 : 0;
157         packet->bitfields4.offload_polling = 0;
158         packet->bitfields4.valid = 1;
159         packet->bitfields4.process_cnt = concurrent_proc_cnt;
160         packet->ordinal2 = lower_32_bits(ib);
161         packet->bitfields3.ib_base_hi = upper_32_bits(ib);
162
163         return 0;
164 }
165
166 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
167                                 struct qcm_process_device *qpd)
168 {
169         struct pm4_mes_map_process *packet;
170
171         packet = (struct pm4_mes_map_process *)buffer;
172
173         memset(buffer, 0, sizeof(struct pm4_mes_map_process));
174
175         packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
176                                         sizeof(struct pm4_mes_map_process));
177         packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
178         packet->bitfields2.process_quantum = 1;
179         packet->bitfields2.pasid = qpd->pqm->process->pasid;
180         packet->bitfields3.page_table_base = qpd->page_table_base;
181         packet->bitfields10.gds_size = qpd->gds_size;
182         packet->bitfields10.num_gws = qpd->num_gws;
183         packet->bitfields10.num_oac = qpd->num_oac;
184         packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
185
186         packet->sh_mem_config = qpd->sh_mem_config;
187         packet->sh_mem_bases = qpd->sh_mem_bases;
188         packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
189         packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
190
191         /* TODO: scratch support */
192         packet->sh_hidden_private_base_vmid = 0;
193
194         packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
195         packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
196
197         return 0;
198 }
199
200 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
201                 struct queue *q, bool is_static)
202 {
203         struct pm4_mes_map_queues *packet;
204         bool use_static = is_static;
205
206         packet = (struct pm4_mes_map_queues *)buffer;
207         memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
208
209         packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
210                                                 sizeof(struct pm4_mes_map_queues));
211         packet->bitfields2.alloc_format =
212                 alloc_format__mes_map_queues__one_per_pipe_vi;
213         packet->bitfields2.num_queues = 1;
214         packet->bitfields2.queue_sel =
215                 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
216
217         packet->bitfields2.engine_sel =
218                 engine_sel__mes_map_queues__compute_vi;
219         packet->bitfields2.queue_type =
220                 queue_type__mes_map_queues__normal_compute_vi;
221
222         switch (q->properties.type) {
223         case KFD_QUEUE_TYPE_COMPUTE:
224                 if (use_static)
225                         packet->bitfields2.queue_type =
226                 queue_type__mes_map_queues__normal_latency_static_queue_vi;
227                 break;
228         case KFD_QUEUE_TYPE_DIQ:
229                 packet->bitfields2.queue_type =
230                         queue_type__mes_map_queues__debug_interface_queue_vi;
231                 break;
232         case KFD_QUEUE_TYPE_SDMA:
233                 packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
234                                 engine_sel__mes_map_queues__sdma0_vi;
235                 use_static = false; /* no static queues under SDMA */
236                 break;
237         default:
238                 WARN(1, "queue type %d", q->properties.type);
239                 return -EINVAL;
240         }
241         packet->bitfields3.doorbell_offset =
242                         q->properties.doorbell_off;
243
244         packet->mqd_addr_lo =
245                         lower_32_bits(q->gart_mqd_addr);
246
247         packet->mqd_addr_hi =
248                         upper_32_bits(q->gart_mqd_addr);
249
250         packet->wptr_addr_lo =
251                         lower_32_bits((uint64_t)q->properties.write_ptr);
252
253         packet->wptr_addr_hi =
254                         upper_32_bits((uint64_t)q->properties.write_ptr);
255
256         return 0;
257 }
258
259 static int pm_create_runlist_ib(struct packet_manager *pm,
260                                 struct list_head *queues,
261                                 uint64_t *rl_gpu_addr,
262                                 size_t *rl_size_bytes)
263 {
264         unsigned int alloc_size_bytes;
265         unsigned int *rl_buffer, rl_wptr, i;
266         int retval, proccesses_mapped;
267         struct device_process_node *cur;
268         struct qcm_process_device *qpd;
269         struct queue *q;
270         struct kernel_queue *kq;
271         bool is_over_subscription;
272
273         rl_wptr = retval = proccesses_mapped = 0;
274
275         retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
276                                 &alloc_size_bytes, &is_over_subscription);
277         if (retval)
278                 return retval;
279
280         *rl_size_bytes = alloc_size_bytes;
281         pm->ib_size_bytes = alloc_size_bytes;
282
283         pr_debug("Building runlist ib process count: %d queues count %d\n",
284                 pm->dqm->processes_count, pm->dqm->queue_count);
285
286         /* build the run list ib packet */
287         list_for_each_entry(cur, queues, list) {
288                 qpd = cur->qpd;
289                 /* build map process packet */
290                 if (proccesses_mapped >= pm->dqm->processes_count) {
291                         pr_debug("Not enough space left in runlist IB\n");
292                         pm_release_ib(pm);
293                         return -ENOMEM;
294                 }
295
296                 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
297                 if (retval)
298                         return retval;
299
300                 proccesses_mapped++;
301                 inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
302                                 alloc_size_bytes);
303
304                 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
305                         if (!kq->queue->properties.is_active)
306                                 continue;
307
308                         pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
309                                 kq->queue->queue, qpd->is_debug);
310
311                         retval = pm_create_map_queue(pm,
312                                                 &rl_buffer[rl_wptr],
313                                                 kq->queue,
314                                                 qpd->is_debug);
315                         if (retval)
316                                 return retval;
317
318                         inc_wptr(&rl_wptr,
319                                 sizeof(struct pm4_mes_map_queues),
320                                 alloc_size_bytes);
321                 }
322
323                 list_for_each_entry(q, &qpd->queues_list, list) {
324                         if (!q->properties.is_active)
325                                 continue;
326
327                         pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
328                                 q->queue, qpd->is_debug);
329
330                         retval = pm_create_map_queue(pm,
331                                                 &rl_buffer[rl_wptr],
332                                                 q,
333                                                 qpd->is_debug);
334
335                         if (retval)
336                                 return retval;
337
338                         inc_wptr(&rl_wptr,
339                                 sizeof(struct pm4_mes_map_queues),
340                                 alloc_size_bytes);
341                 }
342         }
343
344         pr_debug("Finished map process and queues to runlist\n");
345
346         if (is_over_subscription)
347                 retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
348                                         *rl_gpu_addr,
349                                         alloc_size_bytes / sizeof(uint32_t),
350                                         true);
351
352         for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
353                 pr_debug("0x%2X ", rl_buffer[i]);
354         pr_debug("\n");
355
356         return retval;
357 }
358
359 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
360 {
361         pm->dqm = dqm;
362         mutex_init(&pm->lock);
363         pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
364         if (!pm->priv_queue) {
365                 mutex_destroy(&pm->lock);
366                 return -ENOMEM;
367         }
368         pm->allocated = false;
369
370         return 0;
371 }
372
373 void pm_uninit(struct packet_manager *pm)
374 {
375         mutex_destroy(&pm->lock);
376         kernel_queue_uninit(pm->priv_queue);
377 }
378
379 int pm_send_set_resources(struct packet_manager *pm,
380                                 struct scheduling_resources *res)
381 {
382         struct pm4_mes_set_resources *packet;
383         int retval = 0;
384
385         mutex_lock(&pm->lock);
386         pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
387                                         sizeof(*packet) / sizeof(uint32_t),
388                                         (unsigned int **)&packet);
389         if (!packet) {
390                 pr_err("Failed to allocate buffer on kernel queue\n");
391                 retval = -ENOMEM;
392                 goto out;
393         }
394
395         memset(packet, 0, sizeof(struct pm4_mes_set_resources));
396         packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
397                                         sizeof(struct pm4_mes_set_resources));
398
399         packet->bitfields2.queue_type =
400                         queue_type__mes_set_resources__hsa_interface_queue_hiq;
401         packet->bitfields2.vmid_mask = res->vmid_mask;
402         packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
403         packet->bitfields7.oac_mask = res->oac_mask;
404         packet->bitfields8.gds_heap_base = res->gds_heap_base;
405         packet->bitfields8.gds_heap_size = res->gds_heap_size;
406
407         packet->gws_mask_lo = lower_32_bits(res->gws_mask);
408         packet->gws_mask_hi = upper_32_bits(res->gws_mask);
409
410         packet->queue_mask_lo = lower_32_bits(res->queue_mask);
411         packet->queue_mask_hi = upper_32_bits(res->queue_mask);
412
413         pm->priv_queue->ops.submit_packet(pm->priv_queue);
414
415 out:
416         mutex_unlock(&pm->lock);
417
418         return retval;
419 }
420
421 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
422 {
423         uint64_t rl_gpu_ib_addr;
424         uint32_t *rl_buffer;
425         size_t rl_ib_size, packet_size_dwords;
426         int retval;
427
428         retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
429                                         &rl_ib_size);
430         if (retval)
431                 goto fail_create_runlist_ib;
432
433         pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
434
435         packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
436         mutex_lock(&pm->lock);
437
438         retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
439                                         packet_size_dwords, &rl_buffer);
440         if (retval)
441                 goto fail_acquire_packet_buffer;
442
443         retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
444                                         rl_ib_size / sizeof(uint32_t), false);
445         if (retval)
446                 goto fail_create_runlist;
447
448         pm->priv_queue->ops.submit_packet(pm->priv_queue);
449
450         mutex_unlock(&pm->lock);
451
452         return retval;
453
454 fail_create_runlist:
455         pm->priv_queue->ops.rollback_packet(pm->priv_queue);
456 fail_acquire_packet_buffer:
457         mutex_unlock(&pm->lock);
458 fail_create_runlist_ib:
459         pm_release_ib(pm);
460         return retval;
461 }
462
463 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
464                         uint32_t fence_value)
465 {
466         int retval;
467         struct pm4_mes_query_status *packet;
468
469         if (WARN_ON(!fence_address))
470                 return -EFAULT;
471
472         mutex_lock(&pm->lock);
473         retval = pm->priv_queue->ops.acquire_packet_buffer(
474                         pm->priv_queue,
475                         sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
476                         (unsigned int **)&packet);
477         if (retval)
478                 goto fail_acquire_packet_buffer;
479
480         packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
481                                         sizeof(struct pm4_mes_query_status));
482
483         packet->bitfields2.context_id = 0;
484         packet->bitfields2.interrupt_sel =
485                         interrupt_sel__mes_query_status__completion_status;
486         packet->bitfields2.command =
487                         command__mes_query_status__fence_only_after_write_ack;
488
489         packet->addr_hi = upper_32_bits((uint64_t)fence_address);
490         packet->addr_lo = lower_32_bits((uint64_t)fence_address);
491         packet->data_hi = upper_32_bits((uint64_t)fence_value);
492         packet->data_lo = lower_32_bits((uint64_t)fence_value);
493
494         pm->priv_queue->ops.submit_packet(pm->priv_queue);
495
496 fail_acquire_packet_buffer:
497         mutex_unlock(&pm->lock);
498         return retval;
499 }
500
501 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
502                         enum kfd_unmap_queues_filter filter,
503                         uint32_t filter_param, bool reset,
504                         unsigned int sdma_engine)
505 {
506         int retval;
507         uint32_t *buffer;
508         struct pm4_mes_unmap_queues *packet;
509
510         mutex_lock(&pm->lock);
511         retval = pm->priv_queue->ops.acquire_packet_buffer(
512                         pm->priv_queue,
513                         sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
514                         &buffer);
515         if (retval)
516                 goto err_acquire_packet_buffer;
517
518         packet = (struct pm4_mes_unmap_queues *)buffer;
519         memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
520         pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
521                 filter, reset, type);
522         packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
523                                         sizeof(struct pm4_mes_unmap_queues));
524         switch (type) {
525         case KFD_QUEUE_TYPE_COMPUTE:
526         case KFD_QUEUE_TYPE_DIQ:
527                 packet->bitfields2.engine_sel =
528                         engine_sel__mes_unmap_queues__compute;
529                 break;
530         case KFD_QUEUE_TYPE_SDMA:
531                 packet->bitfields2.engine_sel =
532                         engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
533                 break;
534         default:
535                 WARN(1, "queue type %d", type);
536                 retval = -EINVAL;
537                 goto err_invalid;
538         }
539
540         if (reset)
541                 packet->bitfields2.action =
542                                 action__mes_unmap_queues__reset_queues;
543         else
544                 packet->bitfields2.action =
545                                 action__mes_unmap_queues__preempt_queues;
546
547         switch (filter) {
548         case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
549                 packet->bitfields2.queue_sel =
550                                 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
551                 packet->bitfields2.num_queues = 1;
552                 packet->bitfields3b.doorbell_offset0 = filter_param;
553                 break;
554         case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
555                 packet->bitfields2.queue_sel =
556                                 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
557                 packet->bitfields3a.pasid = filter_param;
558                 break;
559         case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
560                 packet->bitfields2.queue_sel =
561                                 queue_sel__mes_unmap_queues__unmap_all_queues;
562                 break;
563         case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
564                 /* in this case, we do not preempt static queues */
565                 packet->bitfields2.queue_sel =
566                                 queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
567                 break;
568         default:
569                 WARN(1, "filter %d", filter);
570                 retval = -EINVAL;
571                 goto err_invalid;
572         }
573
574         pm->priv_queue->ops.submit_packet(pm->priv_queue);
575
576         mutex_unlock(&pm->lock);
577         return 0;
578
579 err_invalid:
580         pm->priv_queue->ops.rollback_packet(pm->priv_queue);
581 err_acquire_packet_buffer:
582         mutex_unlock(&pm->lock);
583         return retval;
584 }
585
586 void pm_release_ib(struct packet_manager *pm)
587 {
588         mutex_lock(&pm->lock);
589         if (pm->allocated) {
590                 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
591                 pm->allocated = false;
592         }
593         mutex_unlock(&pm->lock);
594 }
595
596 #if defined(CONFIG_DEBUG_FS)
597
598 int pm_debugfs_runlist(struct seq_file *m, void *data)
599 {
600         struct packet_manager *pm = data;
601
602         mutex_lock(&pm->lock);
603
604         if (!pm->allocated) {
605                 seq_puts(m, "  No active runlist\n");
606                 goto out;
607         }
608
609         seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
610                      pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
611
612 out:
613         mutex_unlock(&pm->lock);
614         return 0;
615 }
616
617 #endif
This page took 0.069672 seconds and 4 git commands to generate.