1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat
5 * based in parts on udlfb.c:
12 #include <drm/drm_print.h>
13 #include <drm/drm_probe_helper.h>
17 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
20 #define NR_USB_REQUEST_CHANNEL 0x12
22 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
23 #define WRITES_IN_FLIGHT (4)
24 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
26 #define GET_URB_TIMEOUT HZ
27 #define FREE_URB_TIMEOUT (HZ*2)
29 static int udl_parse_vendor_descriptor(struct drm_device *dev,
30 struct usb_device *usbdev)
32 struct udl_device *udl = to_udl(dev);
39 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
44 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
45 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
47 DRM_INFO("vendor descriptor length:%x data:%11ph\n",
50 if ((desc[0] != total_len) || /* descriptor length */
51 (desc[1] != 0x5f) || /* vendor descriptor type */
52 (desc[2] != 0x01) || /* version (2 bytes) */
54 (desc[4] != total_len - 2)) /* length after type */
57 desc_end = desc + total_len;
58 desc += 5; /* the fixed header we've already parsed */
60 while (desc < desc_end) {
64 key = le16_to_cpu(*((u16 *) desc));
70 case 0x0200: { /* max_area */
72 max_area = le32_to_cpu(*((u32 *)desc));
73 DRM_DEBUG("DL chip limited to %d pixel modes\n",
75 udl->sku_pixel_limit = max_area;
88 /* allow udlfb to load for now even if firmware unrecognized */
89 DRM_ERROR("Unrecognized vendor firmware descriptor\n");
97 * Need to ensure a channel is selected before submitting URBs
99 static int udl_select_std_channel(struct udl_device *udl)
102 static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
103 0x1C, 0x88, 0x5E, 0x15,
104 0x60, 0xFE, 0xC6, 0x97,
105 0x16, 0x3D, 0x47, 0xF2};
108 sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
112 ret = usb_control_msg(udl->udev,
113 usb_sndctrlpipe(udl->udev, 0),
114 NR_USB_REQUEST_CHANNEL,
115 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
116 sendbuf, sizeof(set_def_chn),
117 USB_CTRL_SET_TIMEOUT);
119 return ret < 0 ? ret : 0;
122 static void udl_release_urb_work(struct work_struct *work)
124 struct urb_node *unode = container_of(work, struct urb_node,
125 release_urb_work.work);
127 up(&unode->dev->urbs.limit_sem);
130 void udl_urb_completion(struct urb *urb)
132 struct urb_node *unode = urb->context;
133 struct udl_device *udl = unode->dev;
136 /* sync/async unlink faults aren't errors */
138 if (!(urb->status == -ENOENT ||
139 urb->status == -ECONNRESET ||
140 urb->status == -ESHUTDOWN)) {
141 DRM_ERROR("%s - nonzero write bulk status received: %d\n",
142 __func__, urb->status);
143 atomic_set(&udl->lost_pixels, 1);
147 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
149 spin_lock_irqsave(&udl->urbs.lock, flags);
150 list_add_tail(&unode->entry, &udl->urbs.list);
151 udl->urbs.available++;
152 spin_unlock_irqrestore(&udl->urbs.lock, flags);
156 * When using fb_defio, we deadlock if up() is called
157 * while another is waiting. So queue to another process.
160 schedule_delayed_work(&unode->release_urb_work, 0);
163 up(&udl->urbs.limit_sem);
166 static void udl_free_urb_list(struct drm_device *dev)
168 struct udl_device *udl = to_udl(dev);
169 int count = udl->urbs.count;
170 struct list_head *node;
171 struct urb_node *unode;
174 DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
176 /* keep waiting and freeing, until we've got 'em all */
178 down(&udl->urbs.limit_sem);
180 spin_lock_irq(&udl->urbs.lock);
182 node = udl->urbs.list.next; /* have reserved one with sem */
185 spin_unlock_irq(&udl->urbs.lock);
187 unode = list_entry(node, struct urb_node, entry);
190 /* Free each separately allocated piece */
191 usb_free_coherent(urb->dev, udl->urbs.size,
192 urb->transfer_buffer, urb->transfer_dma);
199 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
201 struct udl_device *udl = to_udl(dev);
203 struct urb_node *unode;
205 size_t wanted_size = count * size;
207 spin_lock_init(&udl->urbs.lock);
210 udl->urbs.size = size;
211 INIT_LIST_HEAD(&udl->urbs.list);
213 sema_init(&udl->urbs.limit_sem, 0);
215 udl->urbs.available = 0;
217 while (udl->urbs.count * size < wanted_size) {
218 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
223 INIT_DELAYED_WORK(&unode->release_urb_work,
224 udl_release_urb_work);
226 urb = usb_alloc_urb(0, GFP_KERNEL);
233 buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
238 if (size > PAGE_SIZE) {
240 udl_free_urb_list(dev);
246 /* urb->transfer_buffer_length set to actual before submit */
247 usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
248 buf, size, udl_urb_completion, unode);
249 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
251 list_add_tail(&unode->entry, &udl->urbs.list);
253 up(&udl->urbs.limit_sem);
255 udl->urbs.available++;
258 DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
260 return udl->urbs.count;
263 struct urb *udl_get_urb(struct drm_device *dev)
265 struct udl_device *udl = to_udl(dev);
267 struct list_head *entry;
268 struct urb_node *unode;
269 struct urb *urb = NULL;
271 /* Wait for an in-flight buffer to complete and get re-queued */
272 ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
274 atomic_set(&udl->lost_pixels, 1);
275 DRM_INFO("wait for urb interrupted: %x available: %d\n",
276 ret, udl->urbs.available);
280 spin_lock_irq(&udl->urbs.lock);
282 BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
283 entry = udl->urbs.list.next;
284 list_del_init(entry);
285 udl->urbs.available--;
287 spin_unlock_irq(&udl->urbs.lock);
289 unode = list_entry(entry, struct urb_node, entry);
296 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
298 struct udl_device *udl = to_udl(dev);
301 BUG_ON(len > udl->urbs.size);
303 urb->transfer_buffer_length = len; /* set to actual payload len */
304 ret = usb_submit_urb(urb, GFP_ATOMIC);
306 udl_urb_completion(urb); /* because no one else will */
307 atomic_set(&udl->lost_pixels, 1);
308 DRM_ERROR("usb_submit_urb error %x\n", ret);
313 int udl_init(struct udl_device *udl)
315 struct drm_device *dev = &udl->drm;
320 mutex_init(&udl->gem_lock);
322 if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
324 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
328 if (udl_select_std_channel(udl))
329 DRM_ERROR("Selecting channel failed\n");
331 if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
332 DRM_ERROR("udl_alloc_urb_list failed\n");
337 ret = udl_modeset_init(dev);
341 ret = udl_fbdev_init(dev);
345 drm_kms_helper_poll_init(dev);
351 udl_free_urb_list(dev);
352 DRM_ERROR("%d\n", ret);
356 int udl_drop_usb(struct drm_device *dev)
358 udl_free_urb_list(dev);
362 void udl_fini(struct drm_device *dev)
364 struct udl_device *udl = to_udl(dev);
366 drm_kms_helper_poll_fini(dev);
369 udl_free_urb_list(dev);
371 udl_fbdev_cleanup(dev);