]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
Merge tag 'for-4.18/dm-changes-v2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_bo_api.h>
29
30 #include "vmwgfx_drv.h"
31
32 /*
33  * Size of inline command buffers. Try to make sure that a page size is a
34  * multiple of the DMA pool allocation size.
35  */
36 #define VMW_CMDBUF_INLINE_ALIGN 64
37 #define VMW_CMDBUF_INLINE_SIZE \
38         (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
39
40 /**
41  * struct vmw_cmdbuf_context - Command buffer context queues
42  *
43  * @submitted: List of command buffers that have been submitted to the
44  * manager but not yet submitted to hardware.
45  * @hw_submitted: List of command buffers submitted to hardware.
46  * @preempted: List of preempted command buffers.
47  * @num_hw_submitted: Number of buffers currently being processed by hardware
48  */
49 struct vmw_cmdbuf_context {
50         struct list_head submitted;
51         struct list_head hw_submitted;
52         struct list_head preempted;
53         unsigned num_hw_submitted;
54         bool block_submission;
55 };
56
57 /**
58  * struct vmw_cmdbuf_man: - Command buffer manager
59  *
60  * @cur_mutex: Mutex protecting the command buffer used for incremental small
61  * kernel command submissions, @cur.
62  * @space_mutex: Mutex to protect against starvation when we allocate
63  * main pool buffer space.
64  * @error_mutex: Mutex to serialize the work queue error handling.
65  * Note this is not needed if the same workqueue handler
66  * can't race with itself...
67  * @work: A struct work_struct implementeing command buffer error handling.
68  * Immutable.
69  * @dev_priv: Pointer to the device private struct. Immutable.
70  * @ctx: Array of command buffer context queues. The queues and the context
71  * data is protected by @lock.
72  * @error: List of command buffers that have caused device errors.
73  * Protected by @lock.
74  * @mm: Range manager for the command buffer space. Manager allocations and
75  * frees are protected by @lock.
76  * @cmd_space: Buffer object for the command buffer space, unless we were
77  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
78  * @map_obj: Mapping state for @cmd_space. Immutable.
79  * @map: Pointer to command buffer space. May be a mapped buffer object or
80  * a contigous coherent DMA memory allocation. Immutable.
81  * @cur: Command buffer for small kernel command submissions. Protected by
82  * the @cur_mutex.
83  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
84  * @default_size: Default size for the @cur command buffer. Immutable.
85  * @max_hw_submitted: Max number of in-flight command buffers the device can
86  * handle. Immutable.
87  * @lock: Spinlock protecting command submission queues.
88  * @header: Pool of DMA memory for device command buffer headers.
89  * Internal protection.
90  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
91  * space for inline data. Internal protection.
92  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
93  * space.
94  * @idle_queue: Wait queue for processes waiting for command buffer idle.
95  * @irq_on: Whether the process function has requested irq to be turned on.
96  * Protected by @lock.
97  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
98  * allocation. Immutable.
99  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
100  * Typically this is false only during bootstrap.
101  * @handle: DMA address handle for the command buffer space if @using_mob is
102  * false. Immutable.
103  * @size: The size of the command buffer space. Immutable.
104  * @num_contexts: Number of contexts actually enabled.
105  */
106 struct vmw_cmdbuf_man {
107         struct mutex cur_mutex;
108         struct mutex space_mutex;
109         struct mutex error_mutex;
110         struct work_struct work;
111         struct vmw_private *dev_priv;
112         struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
113         struct list_head error;
114         struct drm_mm mm;
115         struct ttm_buffer_object *cmd_space;
116         struct ttm_bo_kmap_obj map_obj;
117         u8 *map;
118         struct vmw_cmdbuf_header *cur;
119         size_t cur_pos;
120         size_t default_size;
121         unsigned max_hw_submitted;
122         spinlock_t lock;
123         struct dma_pool *headers;
124         struct dma_pool *dheaders;
125         wait_queue_head_t alloc_queue;
126         wait_queue_head_t idle_queue;
127         bool irq_on;
128         bool using_mob;
129         bool has_pool;
130         dma_addr_t handle;
131         size_t size;
132         u32 num_contexts;
133 };
134
135 /**
136  * struct vmw_cmdbuf_header - Command buffer metadata
137  *
138  * @man: The command buffer manager.
139  * @cb_header: Device command buffer header, allocated from a DMA pool.
140  * @cb_context: The device command buffer context.
141  * @list: List head for attaching to the manager lists.
142  * @node: The range manager node.
143  * @handle. The DMA address of @cb_header. Handed to the device on command
144  * buffer submission.
145  * @cmd: Pointer to the command buffer space of this buffer.
146  * @size: Size of the command buffer space of this buffer.
147  * @reserved: Reserved space of this buffer.
148  * @inline_space: Whether inline command buffer space is used.
149  */
150 struct vmw_cmdbuf_header {
151         struct vmw_cmdbuf_man *man;
152         SVGACBHeader *cb_header;
153         SVGACBContext cb_context;
154         struct list_head list;
155         struct drm_mm_node node;
156         dma_addr_t handle;
157         u8 *cmd;
158         size_t size;
159         size_t reserved;
160         bool inline_space;
161 };
162
163 /**
164  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
165  * command buffer space.
166  *
167  * @cb_header: Device command buffer header.
168  * @cmd: Inline command buffer space.
169  */
170 struct vmw_cmdbuf_dheader {
171         SVGACBHeader cb_header;
172         u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
173 };
174
175 /**
176  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
177  *
178  * @page_size: Size of requested command buffer space in pages.
179  * @node: Pointer to the range manager node.
180  * @done: True if this allocation has succeeded.
181  */
182 struct vmw_cmdbuf_alloc_info {
183         size_t page_size;
184         struct drm_mm_node *node;
185         bool done;
186 };
187
188 /* Loop over each context in the command buffer manager. */
189 #define for_each_cmdbuf_ctx(_man, _i, _ctx)                             \
190         for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
191              ++(_i), ++(_ctx))
192
193 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
194                                 bool enable);
195 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
196
197 /**
198  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
199  *
200  * @man: The range manager.
201  * @interruptible: Whether to wait interruptible when locking.
202  */
203 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
204 {
205         if (interruptible) {
206                 if (mutex_lock_interruptible(&man->cur_mutex))
207                         return -ERESTARTSYS;
208         } else {
209                 mutex_lock(&man->cur_mutex);
210         }
211
212         return 0;
213 }
214
215 /**
216  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
217  *
218  * @man: The range manager.
219  */
220 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
221 {
222         mutex_unlock(&man->cur_mutex);
223 }
224
225 /**
226  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
227  * been used for the device context with inline command buffers.
228  * Need not be called locked.
229  *
230  * @header: Pointer to the header to free.
231  */
232 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
233 {
234         struct vmw_cmdbuf_dheader *dheader;
235
236         if (WARN_ON_ONCE(!header->inline_space))
237                 return;
238
239         dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
240                                cb_header);
241         dma_pool_free(header->man->dheaders, dheader, header->handle);
242         kfree(header);
243 }
244
245 /**
246  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
247  * associated structures.
248  *
249  * header: Pointer to the header to free.
250  *
251  * For internal use. Must be called with man::lock held.
252  */
253 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
254 {
255         struct vmw_cmdbuf_man *man = header->man;
256
257         lockdep_assert_held_once(&man->lock);
258
259         if (header->inline_space) {
260                 vmw_cmdbuf_header_inline_free(header);
261                 return;
262         }
263
264         drm_mm_remove_node(&header->node);
265         wake_up_all(&man->alloc_queue);
266         if (header->cb_header)
267                 dma_pool_free(man->headers, header->cb_header,
268                               header->handle);
269         kfree(header);
270 }
271
272 /**
273  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
274  * associated structures.
275  *
276  * @header: Pointer to the header to free.
277  */
278 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
279 {
280         struct vmw_cmdbuf_man *man = header->man;
281
282         /* Avoid locking if inline_space */
283         if (header->inline_space) {
284                 vmw_cmdbuf_header_inline_free(header);
285                 return;
286         }
287         spin_lock(&man->lock);
288         __vmw_cmdbuf_header_free(header);
289         spin_unlock(&man->lock);
290 }
291
292
293 /**
294  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
295  *
296  * @header: The header of the buffer to submit.
297  */
298 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
299 {
300         struct vmw_cmdbuf_man *man = header->man;
301         u32 val;
302
303         val = upper_32_bits(header->handle);
304         vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
305
306         val = lower_32_bits(header->handle);
307         val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
308         vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
309
310         return header->cb_header->status;
311 }
312
313 /**
314  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
315  *
316  * @ctx: The command buffer context to initialize
317  */
318 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
319 {
320         INIT_LIST_HEAD(&ctx->hw_submitted);
321         INIT_LIST_HEAD(&ctx->submitted);
322         INIT_LIST_HEAD(&ctx->preempted);
323         ctx->num_hw_submitted = 0;
324 }
325
326 /**
327  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
328  * context.
329  *
330  * @man: The command buffer manager.
331  * @ctx: The command buffer context.
332  *
333  * Submits command buffers to hardware until there are no more command
334  * buffers to submit or the hardware can't handle more command buffers.
335  */
336 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
337                                   struct vmw_cmdbuf_context *ctx)
338 {
339         while (ctx->num_hw_submitted < man->max_hw_submitted &&
340                !list_empty(&ctx->submitted) &&
341                !ctx->block_submission) {
342                 struct vmw_cmdbuf_header *entry;
343                 SVGACBStatus status;
344
345                 entry = list_first_entry(&ctx->submitted,
346                                          struct vmw_cmdbuf_header,
347                                          list);
348
349                 status = vmw_cmdbuf_header_submit(entry);
350
351                 /* This should never happen */
352                 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
353                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
354                         break;
355                 }
356
357                 list_del(&entry->list);
358                 list_add_tail(&entry->list, &ctx->hw_submitted);
359                 ctx->num_hw_submitted++;
360         }
361
362 }
363
364 /**
365  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
366  *
367  * @man: The command buffer manager.
368  * @ctx: The command buffer context.
369  *
370  * Submit command buffers to hardware if possible, and process finished
371  * buffers. Typically freeing them, but on preemption or error take
372  * appropriate action. Wake up waiters if appropriate.
373  */
374 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
375                                    struct vmw_cmdbuf_context *ctx,
376                                    int *notempty)
377 {
378         struct vmw_cmdbuf_header *entry, *next;
379
380         vmw_cmdbuf_ctx_submit(man, ctx);
381
382         list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
383                 SVGACBStatus status = entry->cb_header->status;
384
385                 if (status == SVGA_CB_STATUS_NONE)
386                         break;
387
388                 list_del(&entry->list);
389                 wake_up_all(&man->idle_queue);
390                 ctx->num_hw_submitted--;
391                 switch (status) {
392                 case SVGA_CB_STATUS_COMPLETED:
393                         __vmw_cmdbuf_header_free(entry);
394                         break;
395                 case SVGA_CB_STATUS_COMMAND_ERROR:
396                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
397                         list_add_tail(&entry->list, &man->error);
398                         schedule_work(&man->work);
399                         break;
400                 case SVGA_CB_STATUS_PREEMPTED:
401                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
402                         list_add_tail(&entry->list, &ctx->preempted);
403                         break;
404                 case SVGA_CB_STATUS_CB_HEADER_ERROR:
405                         WARN_ONCE(true, "Command buffer header error.\n");
406                         __vmw_cmdbuf_header_free(entry);
407                         break;
408                 default:
409                         WARN_ONCE(true, "Undefined command buffer status.\n");
410                         __vmw_cmdbuf_header_free(entry);
411                         break;
412                 }
413         }
414
415         vmw_cmdbuf_ctx_submit(man, ctx);
416         if (!list_empty(&ctx->submitted))
417                 (*notempty)++;
418 }
419
420 /**
421  * vmw_cmdbuf_man_process - Process all command buffer contexts and
422  * switch on and off irqs as appropriate.
423  *
424  * @man: The command buffer manager.
425  *
426  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
427  * command buffers left that are not submitted to hardware, Make sure
428  * IRQ handling is turned on. Otherwise, make sure it's turned off.
429  */
430 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
431 {
432         int notempty;
433         struct vmw_cmdbuf_context *ctx;
434         int i;
435
436 retry:
437         notempty = 0;
438         for_each_cmdbuf_ctx(man, i, ctx)
439                 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
440
441         if (man->irq_on && !notempty) {
442                 vmw_generic_waiter_remove(man->dev_priv,
443                                           SVGA_IRQFLAG_COMMAND_BUFFER,
444                                           &man->dev_priv->cmdbuf_waiters);
445                 man->irq_on = false;
446         } else if (!man->irq_on && notempty) {
447                 vmw_generic_waiter_add(man->dev_priv,
448                                        SVGA_IRQFLAG_COMMAND_BUFFER,
449                                        &man->dev_priv->cmdbuf_waiters);
450                 man->irq_on = true;
451
452                 /* Rerun in case we just missed an irq. */
453                 goto retry;
454         }
455 }
456
457 /**
458  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
459  * command buffer context
460  *
461  * @man: The command buffer manager.
462  * @header: The header of the buffer to submit.
463  * @cb_context: The command buffer context to use.
464  *
465  * This function adds @header to the "submitted" queue of the command
466  * buffer context identified by @cb_context. It then calls the command buffer
467  * manager processing to potentially submit the buffer to hardware.
468  * @man->lock needs to be held when calling this function.
469  */
470 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
471                                struct vmw_cmdbuf_header *header,
472                                SVGACBContext cb_context)
473 {
474         if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
475                 header->cb_header->dxContext = 0;
476         header->cb_context = cb_context;
477         list_add_tail(&header->list, &man->ctx[cb_context].submitted);
478
479         vmw_cmdbuf_man_process(man);
480 }
481
482 /**
483  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
484  * handler implemented as a threaded irq task.
485  *
486  * @man: Pointer to the command buffer manager.
487  *
488  * The bottom half of the interrupt handler simply calls into the
489  * command buffer processor to free finished buffers and submit any
490  * queued buffers to hardware.
491  */
492 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
493 {
494         spin_lock(&man->lock);
495         vmw_cmdbuf_man_process(man);
496         spin_unlock(&man->lock);
497 }
498
499 /**
500  * vmw_cmdbuf_work_func - The deferred work function that handles
501  * command buffer errors.
502  *
503  * @work: The work func closure argument.
504  *
505  * Restarting the command buffer context after an error requires process
506  * context, so it is deferred to this work function.
507  */
508 static void vmw_cmdbuf_work_func(struct work_struct *work)
509 {
510         struct vmw_cmdbuf_man *man =
511                 container_of(work, struct vmw_cmdbuf_man, work);
512         struct vmw_cmdbuf_header *entry, *next;
513         uint32_t dummy;
514         bool restart[SVGA_CB_CONTEXT_MAX];
515         bool send_fence = false;
516         struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
517         int i;
518         struct vmw_cmdbuf_context *ctx;
519         bool global_block = false;
520
521         for_each_cmdbuf_ctx(man, i, ctx) {
522                 INIT_LIST_HEAD(&restart_head[i]);
523                 restart[i] = false;
524         }
525
526         mutex_lock(&man->error_mutex);
527         spin_lock(&man->lock);
528         list_for_each_entry_safe(entry, next, &man->error, list) {
529                 SVGACBHeader *cb_hdr = entry->cb_header;
530                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
531                         (entry->cmd + cb_hdr->errorOffset);
532                 u32 error_cmd_size, new_start_offset;
533                 const char *cmd_name;
534
535                 list_del_init(&entry->list);
536                 restart[entry->cb_context] = true;
537                 global_block = true;
538
539                 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
540                         DRM_ERROR("Unknown command causing device error.\n");
541                         DRM_ERROR("Command buffer offset is %lu\n",
542                                   (unsigned long) cb_hdr->errorOffset);
543                         __vmw_cmdbuf_header_free(entry);
544                         send_fence = true;
545                         continue;
546                 }
547
548                 DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
549                 DRM_ERROR("Command buffer offset is %lu\n",
550                           (unsigned long) cb_hdr->errorOffset);
551                 DRM_ERROR("Command size is %lu\n",
552                           (unsigned long) error_cmd_size);
553
554                 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
555
556                 if (new_start_offset >= cb_hdr->length) {
557                         __vmw_cmdbuf_header_free(entry);
558                         send_fence = true;
559                         continue;
560                 }
561
562                 if (man->using_mob)
563                         cb_hdr->ptr.mob.mobOffset += new_start_offset;
564                 else
565                         cb_hdr->ptr.pa += (u64) new_start_offset;
566
567                 entry->cmd += new_start_offset;
568                 cb_hdr->length -= new_start_offset;
569                 cb_hdr->errorOffset = 0;
570                 cb_hdr->offset = 0;
571
572                 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
573         }
574
575         for_each_cmdbuf_ctx(man, i, ctx)
576                 man->ctx[i].block_submission = true;
577
578         spin_unlock(&man->lock);
579
580         /* Preempt all contexts */
581         if (global_block && vmw_cmdbuf_preempt(man, 0))
582                 DRM_ERROR("Failed preempting command buffer contexts\n");
583
584         spin_lock(&man->lock);
585         for_each_cmdbuf_ctx(man, i, ctx) {
586                 /* Move preempted command buffers to the preempted queue. */
587                 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
588
589                 /*
590                  * Add the preempted queue after the command buffer
591                  * that caused an error.
592                  */
593                 list_splice_init(&ctx->preempted, restart_head[i].prev);
594
595                 /*
596                  * Finally add all command buffers first in the submitted
597                  * queue, to rerun them.
598                  */
599
600                 ctx->block_submission = false;
601                 list_splice_init(&restart_head[i], &ctx->submitted);
602         }
603
604         vmw_cmdbuf_man_process(man);
605         spin_unlock(&man->lock);
606
607         if (global_block && vmw_cmdbuf_startstop(man, 0, true))
608                 DRM_ERROR("Failed restarting command buffer contexts\n");
609
610         /* Send a new fence in case one was removed */
611         if (send_fence) {
612                 vmw_fifo_send_fence(man->dev_priv, &dummy);
613                 wake_up_all(&man->idle_queue);
614         }
615
616         mutex_unlock(&man->error_mutex);
617 }
618
619 /**
620  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
621  *
622  * @man: The command buffer manager.
623  * @check_preempted: Check also the preempted queue for pending command buffers.
624  *
625  */
626 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
627                                 bool check_preempted)
628 {
629         struct vmw_cmdbuf_context *ctx;
630         bool idle = false;
631         int i;
632
633         spin_lock(&man->lock);
634         vmw_cmdbuf_man_process(man);
635         for_each_cmdbuf_ctx(man, i, ctx) {
636                 if (!list_empty(&ctx->submitted) ||
637                     !list_empty(&ctx->hw_submitted) ||
638                     (check_preempted && !list_empty(&ctx->preempted)))
639                         goto out_unlock;
640         }
641
642         idle = list_empty(&man->error);
643
644 out_unlock:
645         spin_unlock(&man->lock);
646
647         return idle;
648 }
649
650 /**
651  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
652  * command submissions
653  *
654  * @man: The command buffer manager.
655  *
656  * Flushes the current command buffer without allocating a new one. A new one
657  * is automatically allocated when needed. Call with @man->cur_mutex held.
658  */
659 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
660 {
661         struct vmw_cmdbuf_header *cur = man->cur;
662
663         WARN_ON(!mutex_is_locked(&man->cur_mutex));
664
665         if (!cur)
666                 return;
667
668         spin_lock(&man->lock);
669         if (man->cur_pos == 0) {
670                 __vmw_cmdbuf_header_free(cur);
671                 goto out_unlock;
672         }
673
674         man->cur->cb_header->length = man->cur_pos;
675         vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
676 out_unlock:
677         spin_unlock(&man->lock);
678         man->cur = NULL;
679         man->cur_pos = 0;
680 }
681
682 /**
683  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
684  * command submissions
685  *
686  * @man: The command buffer manager.
687  * @interruptible: Whether to sleep interruptible when sleeping.
688  *
689  * Flushes the current command buffer without allocating a new one. A new one
690  * is automatically allocated when needed.
691  */
692 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
693                          bool interruptible)
694 {
695         int ret = vmw_cmdbuf_cur_lock(man, interruptible);
696
697         if (ret)
698                 return ret;
699
700         __vmw_cmdbuf_cur_flush(man);
701         vmw_cmdbuf_cur_unlock(man);
702
703         return 0;
704 }
705
706 /**
707  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
708  *
709  * @man: The command buffer manager.
710  * @interruptible: Sleep interruptible while waiting.
711  * @timeout: Time out after this many ticks.
712  *
713  * Wait until the command buffer manager has processed all command buffers,
714  * or until a timeout occurs. If a timeout occurs, the function will return
715  * -EBUSY.
716  */
717 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
718                     unsigned long timeout)
719 {
720         int ret;
721
722         ret = vmw_cmdbuf_cur_flush(man, interruptible);
723         vmw_generic_waiter_add(man->dev_priv,
724                                SVGA_IRQFLAG_COMMAND_BUFFER,
725                                &man->dev_priv->cmdbuf_waiters);
726
727         if (interruptible) {
728                 ret = wait_event_interruptible_timeout
729                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
730                          timeout);
731         } else {
732                 ret = wait_event_timeout
733                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
734                          timeout);
735         }
736         vmw_generic_waiter_remove(man->dev_priv,
737                                   SVGA_IRQFLAG_COMMAND_BUFFER,
738                                   &man->dev_priv->cmdbuf_waiters);
739         if (ret == 0) {
740                 if (!vmw_cmdbuf_man_idle(man, true))
741                         ret = -EBUSY;
742                 else
743                         ret = 0;
744         }
745         if (ret > 0)
746                 ret = 0;
747
748         return ret;
749 }
750
751 /**
752  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
753  *
754  * @man: The command buffer manager.
755  * @info: Allocation info. Will hold the size on entry and allocated mm node
756  * on successful return.
757  *
758  * Try to allocate buffer space from the main pool. Returns true if succeeded.
759  * If a fatal error was hit, the error code is returned in @info->ret.
760  */
761 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
762                                  struct vmw_cmdbuf_alloc_info *info)
763 {
764         int ret;
765
766         if (info->done)
767                 return true;
768  
769         memset(info->node, 0, sizeof(*info->node));
770         spin_lock(&man->lock);
771         ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
772         if (ret) {
773                 vmw_cmdbuf_man_process(man);
774                 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
775         }
776
777         spin_unlock(&man->lock);
778         info->done = !ret;
779
780         return info->done;
781 }
782
783 /**
784  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
785  *
786  * @man: The command buffer manager.
787  * @node: Pointer to pre-allocated range-manager node.
788  * @size: The size of the allocation.
789  * @interruptible: Whether to sleep interruptible while waiting for space.
790  *
791  * This function allocates buffer space from the main pool, and if there is
792  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
793  * become available.
794  */
795 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
796                                   struct drm_mm_node *node,
797                                   size_t size,
798                                   bool interruptible)
799 {
800         struct vmw_cmdbuf_alloc_info info;
801
802         info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
803         info.node = node;
804         info.done = false;
805
806         /*
807          * To prevent starvation of large requests, only one allocating call
808          * at a time waiting for space.
809          */
810         if (interruptible) {
811                 if (mutex_lock_interruptible(&man->space_mutex))
812                         return -ERESTARTSYS;
813         } else {
814                 mutex_lock(&man->space_mutex);
815         }
816
817         /* Try to allocate space without waiting. */
818         if (vmw_cmdbuf_try_alloc(man, &info))
819                 goto out_unlock;
820
821         vmw_generic_waiter_add(man->dev_priv,
822                                SVGA_IRQFLAG_COMMAND_BUFFER,
823                                &man->dev_priv->cmdbuf_waiters);
824
825         if (interruptible) {
826                 int ret;
827
828                 ret = wait_event_interruptible
829                         (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
830                 if (ret) {
831                         vmw_generic_waiter_remove
832                                 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
833                                  &man->dev_priv->cmdbuf_waiters);
834                         mutex_unlock(&man->space_mutex);
835                         return ret;
836                 }
837         } else {
838                 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
839         }
840         vmw_generic_waiter_remove(man->dev_priv,
841                                   SVGA_IRQFLAG_COMMAND_BUFFER,
842                                   &man->dev_priv->cmdbuf_waiters);
843
844 out_unlock:
845         mutex_unlock(&man->space_mutex);
846
847         return 0;
848 }
849
850 /**
851  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
852  * space from the main pool.
853  *
854  * @man: The command buffer manager.
855  * @header: Pointer to the header to set up.
856  * @size: The requested size of the buffer space.
857  * @interruptible: Whether to sleep interruptible while waiting for space.
858  */
859 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
860                                  struct vmw_cmdbuf_header *header,
861                                  size_t size,
862                                  bool interruptible)
863 {
864         SVGACBHeader *cb_hdr;
865         size_t offset;
866         int ret;
867
868         if (!man->has_pool)
869                 return -ENOMEM;
870
871         ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
872
873         if (ret)
874                 return ret;
875
876         header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
877                                             &header->handle);
878         if (!header->cb_header) {
879                 ret = -ENOMEM;
880                 goto out_no_cb_header;
881         }
882
883         header->size = header->node.size << PAGE_SHIFT;
884         cb_hdr = header->cb_header;
885         offset = header->node.start << PAGE_SHIFT;
886         header->cmd = man->map + offset;
887         if (man->using_mob) {
888                 cb_hdr->flags = SVGA_CB_FLAG_MOB;
889                 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
890                 cb_hdr->ptr.mob.mobOffset = offset;
891         } else {
892                 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
893         }
894
895         return 0;
896
897 out_no_cb_header:
898         spin_lock(&man->lock);
899         drm_mm_remove_node(&header->node);
900         spin_unlock(&man->lock);
901
902         return ret;
903 }
904
905 /**
906  * vmw_cmdbuf_space_inline - Set up a command buffer header with
907  * inline command buffer space.
908  *
909  * @man: The command buffer manager.
910  * @header: Pointer to the header to set up.
911  * @size: The requested size of the buffer space.
912  */
913 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
914                                    struct vmw_cmdbuf_header *header,
915                                    int size)
916 {
917         struct vmw_cmdbuf_dheader *dheader;
918         SVGACBHeader *cb_hdr;
919
920         if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
921                 return -ENOMEM;
922
923         dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
924                                   &header->handle);
925         if (!dheader)
926                 return -ENOMEM;
927
928         header->inline_space = true;
929         header->size = VMW_CMDBUF_INLINE_SIZE;
930         cb_hdr = &dheader->cb_header;
931         header->cb_header = cb_hdr;
932         header->cmd = dheader->cmd;
933         cb_hdr->status = SVGA_CB_STATUS_NONE;
934         cb_hdr->flags = SVGA_CB_FLAG_NONE;
935         cb_hdr->ptr.pa = (u64)header->handle +
936                 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
937
938         return 0;
939 }
940
941 /**
942  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
943  * command buffer space.
944  *
945  * @man: The command buffer manager.
946  * @size: The requested size of the buffer space.
947  * @interruptible: Whether to sleep interruptible while waiting for space.
948  * @p_header: points to a header pointer to populate on successful return.
949  *
950  * Returns a pointer to command buffer space if successful. Otherwise
951  * returns an error pointer. The header pointer returned in @p_header should
952  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
953  */
954 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
955                        size_t size, bool interruptible,
956                        struct vmw_cmdbuf_header **p_header)
957 {
958         struct vmw_cmdbuf_header *header;
959         int ret = 0;
960
961         *p_header = NULL;
962
963         header = kzalloc(sizeof(*header), GFP_KERNEL);
964         if (!header)
965                 return ERR_PTR(-ENOMEM);
966
967         if (size <= VMW_CMDBUF_INLINE_SIZE)
968                 ret = vmw_cmdbuf_space_inline(man, header, size);
969         else
970                 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
971
972         if (ret) {
973                 kfree(header);
974                 return ERR_PTR(ret);
975         }
976
977         header->man = man;
978         INIT_LIST_HEAD(&header->list);
979         header->cb_header->status = SVGA_CB_STATUS_NONE;
980         *p_header = header;
981
982         return header->cmd;
983 }
984
985 /**
986  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
987  * command buffer.
988  *
989  * @man: The command buffer manager.
990  * @size: The requested size of the commands.
991  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
992  * @interruptible: Whether to sleep interruptible while waiting for space.
993  *
994  * Returns a pointer to command buffer space if successful. Otherwise
995  * returns an error pointer.
996  */
997 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
998                                     size_t size,
999                                     int ctx_id,
1000                                     bool interruptible)
1001 {
1002         struct vmw_cmdbuf_header *cur;
1003         void *ret;
1004
1005         if (vmw_cmdbuf_cur_lock(man, interruptible))
1006                 return ERR_PTR(-ERESTARTSYS);
1007
1008         cur = man->cur;
1009         if (cur && (size + man->cur_pos > cur->size ||
1010                     ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1011                      ctx_id != cur->cb_header->dxContext)))
1012                 __vmw_cmdbuf_cur_flush(man);
1013
1014         if (!man->cur) {
1015                 ret = vmw_cmdbuf_alloc(man,
1016                                        max_t(size_t, size, man->default_size),
1017                                        interruptible, &man->cur);
1018                 if (IS_ERR(ret)) {
1019                         vmw_cmdbuf_cur_unlock(man);
1020                         return ret;
1021                 }
1022
1023                 cur = man->cur;
1024         }
1025
1026         if (ctx_id != SVGA3D_INVALID_ID) {
1027                 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1028                 cur->cb_header->dxContext = ctx_id;
1029         }
1030
1031         cur->reserved = size;
1032
1033         return (void *) (man->cur->cmd + man->cur_pos);
1034 }
1035
1036 /**
1037  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1038  *
1039  * @man: The command buffer manager.
1040  * @size: The size of the commands actually written.
1041  * @flush: Whether to flush the command buffer immediately.
1042  */
1043 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1044                                   size_t size, bool flush)
1045 {
1046         struct vmw_cmdbuf_header *cur = man->cur;
1047
1048         WARN_ON(!mutex_is_locked(&man->cur_mutex));
1049
1050         WARN_ON(size > cur->reserved);
1051         man->cur_pos += size;
1052         if (!size)
1053                 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1054         if (flush)
1055                 __vmw_cmdbuf_cur_flush(man);
1056         vmw_cmdbuf_cur_unlock(man);
1057 }
1058
1059 /**
1060  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1061  *
1062  * @man: The command buffer manager.
1063  * @size: The requested size of the commands.
1064  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1065  * @interruptible: Whether to sleep interruptible while waiting for space.
1066  * @header: Header of the command buffer. NULL if the current command buffer
1067  * should be used.
1068  *
1069  * Returns a pointer to command buffer space if successful. Otherwise
1070  * returns an error pointer.
1071  */
1072 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1073                          int ctx_id, bool interruptible,
1074                          struct vmw_cmdbuf_header *header)
1075 {
1076         if (!header)
1077                 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1078
1079         if (size > header->size)
1080                 return ERR_PTR(-EINVAL);
1081
1082         if (ctx_id != SVGA3D_INVALID_ID) {
1083                 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1084                 header->cb_header->dxContext = ctx_id;
1085         }
1086
1087         header->reserved = size;
1088         return header->cmd;
1089 }
1090
1091 /**
1092  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1093  *
1094  * @man: The command buffer manager.
1095  * @size: The size of the commands actually written.
1096  * @header: Header of the command buffer. NULL if the current command buffer
1097  * should be used.
1098  * @flush: Whether to flush the command buffer immediately.
1099  */
1100 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1101                        struct vmw_cmdbuf_header *header, bool flush)
1102 {
1103         if (!header) {
1104                 vmw_cmdbuf_commit_cur(man, size, flush);
1105                 return;
1106         }
1107
1108         (void) vmw_cmdbuf_cur_lock(man, false);
1109         __vmw_cmdbuf_cur_flush(man);
1110         WARN_ON(size > header->reserved);
1111         man->cur = header;
1112         man->cur_pos = size;
1113         if (!size)
1114                 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1115         if (flush)
1116                 __vmw_cmdbuf_cur_flush(man);
1117         vmw_cmdbuf_cur_unlock(man);
1118 }
1119
1120
1121 /**
1122  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1123  *
1124  * @man: The command buffer manager.
1125  * @command: Pointer to the command to send.
1126  * @size: Size of the command.
1127  *
1128  * Synchronously sends a device context command.
1129  */
1130 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1131                                           const void *command,
1132                                           size_t size)
1133 {
1134         struct vmw_cmdbuf_header *header;
1135         int status;
1136         void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1137
1138         if (IS_ERR(cmd))
1139                 return PTR_ERR(cmd);
1140
1141         memcpy(cmd, command, size);
1142         header->cb_header->length = size;
1143         header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1144         spin_lock(&man->lock);
1145         status = vmw_cmdbuf_header_submit(header);
1146         spin_unlock(&man->lock);
1147         vmw_cmdbuf_header_free(header);
1148
1149         if (status != SVGA_CB_STATUS_COMPLETED) {
1150                 DRM_ERROR("Device context command failed with status %d\n",
1151                           status);
1152                 return -EINVAL;
1153         }
1154
1155         return 0;
1156 }
1157
1158 /**
1159  * vmw_cmdbuf_preempt - Send a preempt command through the device
1160  * context.
1161  *
1162  * @man: The command buffer manager.
1163  *
1164  * Synchronously sends a preempt command.
1165  */
1166 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1167 {
1168         struct {
1169                 uint32 id;
1170                 SVGADCCmdPreempt body;
1171         } __packed cmd;
1172
1173         cmd.id = SVGA_DC_CMD_PREEMPT;
1174         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1175         cmd.body.ignoreIDZero = 0;
1176
1177         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1178 }
1179
1180
1181 /**
1182  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1183  * context.
1184  *
1185  * @man: The command buffer manager.
1186  * @enable: Whether to enable or disable the context.
1187  *
1188  * Synchronously sends a device start / stop context command.
1189  */
1190 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1191                                 bool enable)
1192 {
1193         struct {
1194                 uint32 id;
1195                 SVGADCCmdStartStop body;
1196         } __packed cmd;
1197
1198         cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1199         cmd.body.enable = (enable) ? 1 : 0;
1200         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1201
1202         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1203 }
1204
1205 /**
1206  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1207  *
1208  * @man: The command buffer manager.
1209  * @size: The size of the main space pool.
1210  * @default_size: The default size of the command buffer for small kernel
1211  * submissions.
1212  *
1213  * Set the size and allocate the main command buffer space pool,
1214  * as well as the default size of the command buffer for
1215  * small kernel submissions. If successful, this enables large command
1216  * submissions. Note that this function requires that rudimentary command
1217  * submission is already available and that the MOB memory manager is alive.
1218  * Returns 0 on success. Negative error code on failure.
1219  */
1220 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1221                              size_t size, size_t default_size)
1222 {
1223         struct vmw_private *dev_priv = man->dev_priv;
1224         bool dummy;
1225         int ret;
1226
1227         if (man->has_pool)
1228                 return -EINVAL;
1229
1230         /* First, try to allocate a huge chunk of DMA memory */
1231         size = PAGE_ALIGN(size);
1232         man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1233                                       &man->handle, GFP_KERNEL);
1234         if (man->map) {
1235                 man->using_mob = false;
1236         } else {
1237                 /*
1238                  * DMA memory failed. If we can have command buffers in a
1239                  * MOB, try to use that instead. Note that this will
1240                  * actually call into the already enabled manager, when
1241                  * binding the MOB.
1242                  */
1243                 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1244                         return -ENOMEM;
1245
1246                 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1247                                     &vmw_mob_ne_placement, 0, false,
1248                                     &man->cmd_space);
1249                 if (ret)
1250                         return ret;
1251
1252                 man->using_mob = true;
1253                 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1254                                   &man->map_obj);
1255                 if (ret)
1256                         goto out_no_map;
1257
1258                 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1259         }
1260
1261         man->size = size;
1262         drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1263
1264         man->has_pool = true;
1265
1266         /*
1267          * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1268          * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1269          * needs to wait for space and we block on further command
1270          * submissions to be able to free up space.
1271          */
1272         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1273         DRM_INFO("Using command buffers with %s pool.\n",
1274                  (man->using_mob) ? "MOB" : "DMA");
1275
1276         return 0;
1277
1278 out_no_map:
1279         if (man->using_mob)
1280                 ttm_bo_unref(&man->cmd_space);
1281
1282         return ret;
1283 }
1284
1285 /**
1286  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1287  * inline command buffer submissions only.
1288  *
1289  * @dev_priv: Pointer to device private structure.
1290  *
1291  * Returns a pointer to a cummand buffer manager to success or error pointer
1292  * on failure. The command buffer manager will be enabled for submissions of
1293  * size VMW_CMDBUF_INLINE_SIZE only.
1294  */
1295 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1296 {
1297         struct vmw_cmdbuf_man *man;
1298         struct vmw_cmdbuf_context *ctx;
1299         unsigned int i;
1300         int ret;
1301
1302         if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1303                 return ERR_PTR(-ENOSYS);
1304
1305         man = kzalloc(sizeof(*man), GFP_KERNEL);
1306         if (!man)
1307                 return ERR_PTR(-ENOMEM);
1308
1309         man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1310                 2 : 1;
1311         man->headers = dma_pool_create("vmwgfx cmdbuf",
1312                                        &dev_priv->dev->pdev->dev,
1313                                        sizeof(SVGACBHeader),
1314                                        64, PAGE_SIZE);
1315         if (!man->headers) {
1316                 ret = -ENOMEM;
1317                 goto out_no_pool;
1318         }
1319
1320         man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1321                                         &dev_priv->dev->pdev->dev,
1322                                         sizeof(struct vmw_cmdbuf_dheader),
1323                                         64, PAGE_SIZE);
1324         if (!man->dheaders) {
1325                 ret = -ENOMEM;
1326                 goto out_no_dpool;
1327         }
1328
1329         for_each_cmdbuf_ctx(man, i, ctx)
1330                 vmw_cmdbuf_ctx_init(ctx);
1331
1332         INIT_LIST_HEAD(&man->error);
1333         spin_lock_init(&man->lock);
1334         mutex_init(&man->cur_mutex);
1335         mutex_init(&man->space_mutex);
1336         mutex_init(&man->error_mutex);
1337         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1338         init_waitqueue_head(&man->alloc_queue);
1339         init_waitqueue_head(&man->idle_queue);
1340         man->dev_priv = dev_priv;
1341         man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1342         INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1343         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1344                                &dev_priv->error_waiters);
1345         ret = vmw_cmdbuf_startstop(man, 0, true);
1346         if (ret) {
1347                 DRM_ERROR("Failed starting command buffer contexts\n");
1348                 vmw_cmdbuf_man_destroy(man);
1349                 return ERR_PTR(ret);
1350         }
1351
1352         return man;
1353
1354 out_no_dpool:
1355         dma_pool_destroy(man->headers);
1356 out_no_pool:
1357         kfree(man);
1358
1359         return ERR_PTR(ret);
1360 }
1361
1362 /**
1363  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1364  *
1365  * @man: Pointer to a command buffer manager.
1366  *
1367  * This function removes the main buffer space pool, and should be called
1368  * before MOB memory management is removed. When this function has been called,
1369  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1370  * less are allowed, and the default size of the command buffer for small kernel
1371  * submissions is also set to this size.
1372  */
1373 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1374 {
1375         if (!man->has_pool)
1376                 return;
1377
1378         man->has_pool = false;
1379         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1380         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1381         if (man->using_mob) {
1382                 (void) ttm_bo_kunmap(&man->map_obj);
1383                 ttm_bo_unref(&man->cmd_space);
1384         } else {
1385                 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1386                                   man->size, man->map, man->handle);
1387         }
1388 }
1389
1390 /**
1391  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1392  *
1393  * @man: Pointer to a command buffer manager.
1394  *
1395  * This function idles and then destroys a command buffer manager.
1396  */
1397 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1398 {
1399         WARN_ON_ONCE(man->has_pool);
1400         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1401
1402         if (vmw_cmdbuf_startstop(man, 0, false))
1403                 DRM_ERROR("Failed stopping command buffer contexts.\n");
1404
1405         vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1406                                   &man->dev_priv->error_waiters);
1407         (void) cancel_work_sync(&man->work);
1408         dma_pool_destroy(man->dheaders);
1409         dma_pool_destroy(man->headers);
1410         mutex_destroy(&man->cur_mutex);
1411         mutex_destroy(&man->space_mutex);
1412         mutex_destroy(&man->error_mutex);
1413         kfree(man);
1414 }
This page took 0.119372 seconds and 4 git commands to generate.