]>
Commit | Line | Data |
---|---|---|
3c8a63e2 SG |
1 | /* |
2 | * smscufx.c -- Framebuffer driver for SMSC UFX USB controller | |
3 | * | |
90b24cfb | 4 | * Copyright (C) 2011 Steve Glendinning <[email protected]> |
3c8a63e2 SG |
5 | * Copyright (C) 2009 Roberto De Ioris <[email protected]> |
6 | * Copyright (C) 2009 Jaya Kumar <[email protected]> | |
7 | * Copyright (C) 2009 Bernie Thompson <[email protected]> | |
8 | * | |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License v2. See the file COPYING in the main directory of this archive for | |
11 | * more details. | |
12 | * | |
13 | * Based on udlfb, with work from Florian Echtler, Henrik Bjerregaard Pedersen, | |
14 | * and others. | |
15 | * | |
16 | * Works well with Bernie Thompson's X DAMAGE patch to xf86-video-fbdev | |
17 | * available from http://git.plugable.com | |
18 | * | |
19 | * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven, | |
20 | * usb-skeleton by GregKH. | |
21 | */ | |
22 | ||
23 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/usb.h> | |
29 | #include <linux/uaccess.h> | |
30 | #include <linux/mm.h> | |
31 | #include <linux/fb.h> | |
32 | #include <linux/vmalloc.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/delay.h> | |
35 | #include "edid.h" | |
36 | ||
37 | #define check_warn(status, fmt, args...) \ | |
38 | ({ if (status < 0) pr_warn(fmt, ##args); }) | |
39 | ||
40 | #define check_warn_return(status, fmt, args...) \ | |
41 | ({ if (status < 0) { pr_warn(fmt, ##args); return status; } }) | |
42 | ||
43 | #define check_warn_goto_error(status, fmt, args...) \ | |
44 | ({ if (status < 0) { pr_warn(fmt, ##args); goto error; } }) | |
45 | ||
46 | #define all_bits_set(x, bits) (((x) & (bits)) == (bits)) | |
47 | ||
48 | #define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0 | |
49 | #define USB_VENDOR_REQUEST_READ_REGISTER 0xA1 | |
50 | ||
51 | /* | |
52 | * TODO: Propose standard fb.h ioctl for reporting damage, | |
53 | * using _IOWR() and one of the existing area structs from fb.h | |
54 | * Consider these ioctls deprecated, but they're still used by the | |
55 | * DisplayLink X server as yet - need both to be modified in tandem | |
56 | * when new ioctl(s) are ready. | |
57 | */ | |
58 | #define UFX_IOCTL_RETURN_EDID (0xAD) | |
59 | #define UFX_IOCTL_REPORT_DAMAGE (0xAA) | |
60 | ||
61 | /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ | |
62 | #define BULK_SIZE (512) | |
63 | #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) | |
64 | #define WRITES_IN_FLIGHT (4) | |
65 | ||
66 | #define GET_URB_TIMEOUT (HZ) | |
67 | #define FREE_URB_TIMEOUT (HZ*2) | |
68 | ||
69 | #define BPP 2 | |
70 | ||
71 | #define UFX_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ | |
72 | #define UFX_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */ | |
73 | ||
74 | struct dloarea { | |
75 | int x, y; | |
76 | int w, h; | |
77 | }; | |
78 | ||
79 | struct urb_node { | |
80 | struct list_head entry; | |
81 | struct ufx_data *dev; | |
82 | struct delayed_work release_urb_work; | |
83 | struct urb *urb; | |
84 | }; | |
85 | ||
86 | struct urb_list { | |
87 | struct list_head list; | |
88 | spinlock_t lock; | |
89 | struct semaphore limit_sem; | |
90 | int available; | |
91 | int count; | |
92 | size_t size; | |
93 | }; | |
94 | ||
95 | struct ufx_data { | |
96 | struct usb_device *udev; | |
97 | struct device *gdev; /* &udev->dev */ | |
98 | struct fb_info *info; | |
99 | struct urb_list urbs; | |
100 | struct kref kref; | |
101 | int fb_count; | |
102 | bool virtualized; /* true when physical usb device not present */ | |
103 | struct delayed_work free_framebuffer_work; | |
104 | atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */ | |
105 | atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ | |
261e7676 | 106 | u8 *edid; /* null until we read edid from hw or get from sysfs */ |
3c8a63e2 SG |
107 | size_t edid_size; |
108 | u32 pseudo_palette[256]; | |
109 | }; | |
110 | ||
111 | static struct fb_fix_screeninfo ufx_fix = { | |
112 | .id = "smscufx", | |
113 | .type = FB_TYPE_PACKED_PIXELS, | |
114 | .visual = FB_VISUAL_TRUECOLOR, | |
115 | .xpanstep = 0, | |
116 | .ypanstep = 0, | |
117 | .ywrapstep = 0, | |
118 | .accel = FB_ACCEL_NONE, | |
119 | }; | |
120 | ||
121 | static const u32 smscufx_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | | |
122 | FBINFO_VIRTFB | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | | |
123 | FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; | |
124 | ||
125 | static struct usb_device_id id_table[] = { | |
126 | {USB_DEVICE(0x0424, 0x9d00),}, | |
127 | {USB_DEVICE(0x0424, 0x9d01),}, | |
128 | {}, | |
129 | }; | |
130 | MODULE_DEVICE_TABLE(usb, id_table); | |
131 | ||
132 | /* module options */ | |
90ab5ee9 RR |
133 | static bool console; /* Optionally allow fbcon to consume first framebuffer */ |
134 | static bool fb_defio = true; /* Optionally enable fb_defio mmap support */ | |
3c8a63e2 SG |
135 | |
136 | /* ufx keeps a list of urbs for efficient bulk transfers */ | |
137 | static void ufx_urb_completion(struct urb *urb); | |
138 | static struct urb *ufx_get_urb(struct ufx_data *dev); | |
139 | static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len); | |
140 | static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size); | |
141 | static void ufx_free_urb_list(struct ufx_data *dev); | |
142 | ||
143 | /* reads a control register */ | |
144 | static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data) | |
145 | { | |
146 | u32 *buf = kmalloc(4, GFP_KERNEL); | |
147 | int ret; | |
148 | ||
149 | BUG_ON(!dev); | |
150 | ||
151 | if (!buf) | |
152 | return -ENOMEM; | |
153 | ||
154 | ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | |
155 | USB_VENDOR_REQUEST_READ_REGISTER, | |
156 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | |
157 | 00, index, buf, 4, USB_CTRL_GET_TIMEOUT); | |
158 | ||
159 | le32_to_cpus(buf); | |
160 | *data = *buf; | |
161 | kfree(buf); | |
162 | ||
163 | if (unlikely(ret < 0)) | |
164 | pr_warn("Failed to read register index 0x%08x\n", index); | |
165 | ||
166 | return ret; | |
167 | } | |
168 | ||
169 | /* writes a control register */ | |
170 | static int ufx_reg_write(struct ufx_data *dev, u32 index, u32 data) | |
171 | { | |
172 | u32 *buf = kmalloc(4, GFP_KERNEL); | |
173 | int ret; | |
174 | ||
175 | BUG_ON(!dev); | |
176 | ||
177 | if (!buf) | |
178 | return -ENOMEM; | |
179 | ||
180 | *buf = data; | |
181 | cpu_to_le32s(buf); | |
182 | ||
183 | ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | |
184 | USB_VENDOR_REQUEST_WRITE_REGISTER, | |
185 | USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | |
186 | 00, index, buf, 4, USB_CTRL_SET_TIMEOUT); | |
187 | ||
188 | kfree(buf); | |
189 | ||
190 | if (unlikely(ret < 0)) | |
191 | pr_warn("Failed to write register index 0x%08x with value " | |
192 | "0x%08x\n", index, data); | |
193 | ||
194 | return ret; | |
195 | } | |
196 | ||
197 | static int ufx_reg_clear_and_set_bits(struct ufx_data *dev, u32 index, | |
198 | u32 bits_to_clear, u32 bits_to_set) | |
199 | { | |
200 | u32 data; | |
201 | int status = ufx_reg_read(dev, index, &data); | |
202 | check_warn_return(status, "ufx_reg_clear_and_set_bits error reading " | |
203 | "0x%x", index); | |
204 | ||
205 | data &= (~bits_to_clear); | |
206 | data |= bits_to_set; | |
207 | ||
208 | status = ufx_reg_write(dev, index, data); | |
209 | check_warn_return(status, "ufx_reg_clear_and_set_bits error writing " | |
210 | "0x%x", index); | |
211 | ||
212 | return 0; | |
213 | } | |
214 | ||
215 | static int ufx_reg_set_bits(struct ufx_data *dev, u32 index, u32 bits) | |
216 | { | |
217 | return ufx_reg_clear_and_set_bits(dev, index, 0, bits); | |
218 | } | |
219 | ||
220 | static int ufx_reg_clear_bits(struct ufx_data *dev, u32 index, u32 bits) | |
221 | { | |
222 | return ufx_reg_clear_and_set_bits(dev, index, bits, 0); | |
223 | } | |
224 | ||
225 | static int ufx_lite_reset(struct ufx_data *dev) | |
226 | { | |
227 | int status; | |
228 | u32 value; | |
229 | ||
230 | status = ufx_reg_write(dev, 0x3008, 0x00000001); | |
231 | check_warn_return(status, "ufx_lite_reset error writing 0x3008"); | |
232 | ||
233 | status = ufx_reg_read(dev, 0x3008, &value); | |
234 | check_warn_return(status, "ufx_lite_reset error reading 0x3008"); | |
235 | ||
236 | return (value == 0) ? 0 : -EIO; | |
237 | } | |
238 | ||
239 | /* If display is unblanked, then blank it */ | |
240 | static int ufx_blank(struct ufx_data *dev, bool wait) | |
241 | { | |
242 | u32 dc_ctrl, dc_sts; | |
243 | int i; | |
244 | ||
245 | int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
246 | check_warn_return(status, "ufx_blank error reading 0x2004"); | |
247 | ||
248 | status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
249 | check_warn_return(status, "ufx_blank error reading 0x2000"); | |
250 | ||
251 | /* return success if display is already blanked */ | |
252 | if ((dc_sts & 0x00000100) || (dc_ctrl & 0x00000100)) | |
253 | return 0; | |
254 | ||
255 | /* request the DC to blank the display */ | |
256 | dc_ctrl |= 0x00000100; | |
257 | status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
258 | check_warn_return(status, "ufx_blank error writing 0x2000"); | |
259 | ||
260 | /* return success immediately if we don't have to wait */ | |
261 | if (!wait) | |
262 | return 0; | |
263 | ||
264 | for (i = 0; i < 250; i++) { | |
265 | status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
266 | check_warn_return(status, "ufx_blank error reading 0x2004"); | |
267 | ||
268 | if (dc_sts & 0x00000100) | |
269 | return 0; | |
270 | } | |
271 | ||
272 | /* timed out waiting for display to blank */ | |
273 | return -EIO; | |
274 | } | |
275 | ||
276 | /* If display is blanked, then unblank it */ | |
277 | static int ufx_unblank(struct ufx_data *dev, bool wait) | |
278 | { | |
279 | u32 dc_ctrl, dc_sts; | |
280 | int i; | |
281 | ||
282 | int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
283 | check_warn_return(status, "ufx_unblank error reading 0x2004"); | |
284 | ||
285 | status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
286 | check_warn_return(status, "ufx_unblank error reading 0x2000"); | |
287 | ||
288 | /* return success if display is already unblanked */ | |
289 | if (((dc_sts & 0x00000100) == 0) || ((dc_ctrl & 0x00000100) == 0)) | |
290 | return 0; | |
291 | ||
292 | /* request the DC to unblank the display */ | |
293 | dc_ctrl &= ~0x00000100; | |
294 | status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
295 | check_warn_return(status, "ufx_unblank error writing 0x2000"); | |
296 | ||
297 | /* return success immediately if we don't have to wait */ | |
298 | if (!wait) | |
299 | return 0; | |
300 | ||
301 | for (i = 0; i < 250; i++) { | |
302 | status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
303 | check_warn_return(status, "ufx_unblank error reading 0x2004"); | |
304 | ||
305 | if ((dc_sts & 0x00000100) == 0) | |
306 | return 0; | |
307 | } | |
308 | ||
309 | /* timed out waiting for display to unblank */ | |
310 | return -EIO; | |
311 | } | |
312 | ||
313 | /* If display is enabled, then disable it */ | |
314 | static int ufx_disable(struct ufx_data *dev, bool wait) | |
315 | { | |
316 | u32 dc_ctrl, dc_sts; | |
317 | int i; | |
318 | ||
319 | int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
320 | check_warn_return(status, "ufx_disable error reading 0x2004"); | |
321 | ||
322 | status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
323 | check_warn_return(status, "ufx_disable error reading 0x2000"); | |
324 | ||
325 | /* return success if display is already disabled */ | |
326 | if (((dc_sts & 0x00000001) == 0) || ((dc_ctrl & 0x00000001) == 0)) | |
327 | return 0; | |
328 | ||
329 | /* request the DC to disable the display */ | |
330 | dc_ctrl &= ~(0x00000001); | |
331 | status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
332 | check_warn_return(status, "ufx_disable error writing 0x2000"); | |
333 | ||
334 | /* return success immediately if we don't have to wait */ | |
335 | if (!wait) | |
336 | return 0; | |
337 | ||
338 | for (i = 0; i < 250; i++) { | |
339 | status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
340 | check_warn_return(status, "ufx_disable error reading 0x2004"); | |
341 | ||
342 | if ((dc_sts & 0x00000001) == 0) | |
343 | return 0; | |
344 | } | |
345 | ||
346 | /* timed out waiting for display to disable */ | |
347 | return -EIO; | |
348 | } | |
349 | ||
350 | /* If display is disabled, then enable it */ | |
351 | static int ufx_enable(struct ufx_data *dev, bool wait) | |
352 | { | |
353 | u32 dc_ctrl, dc_sts; | |
354 | int i; | |
355 | ||
356 | int status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
357 | check_warn_return(status, "ufx_enable error reading 0x2004"); | |
358 | ||
359 | status = ufx_reg_read(dev, 0x2000, &dc_ctrl); | |
360 | check_warn_return(status, "ufx_enable error reading 0x2000"); | |
361 | ||
362 | /* return success if display is already enabled */ | |
363 | if ((dc_sts & 0x00000001) || (dc_ctrl & 0x00000001)) | |
364 | return 0; | |
365 | ||
366 | /* request the DC to enable the display */ | |
367 | dc_ctrl |= 0x00000001; | |
368 | status = ufx_reg_write(dev, 0x2000, dc_ctrl); | |
369 | check_warn_return(status, "ufx_enable error writing 0x2000"); | |
370 | ||
371 | /* return success immediately if we don't have to wait */ | |
372 | if (!wait) | |
373 | return 0; | |
374 | ||
375 | for (i = 0; i < 250; i++) { | |
376 | status = ufx_reg_read(dev, 0x2004, &dc_sts); | |
377 | check_warn_return(status, "ufx_enable error reading 0x2004"); | |
378 | ||
379 | if (dc_sts & 0x00000001) | |
380 | return 0; | |
381 | } | |
382 | ||
383 | /* timed out waiting for display to enable */ | |
384 | return -EIO; | |
385 | } | |
386 | ||
387 | static int ufx_config_sys_clk(struct ufx_data *dev) | |
388 | { | |
389 | int status = ufx_reg_write(dev, 0x700C, 0x8000000F); | |
390 | check_warn_return(status, "error writing 0x700C"); | |
391 | ||
392 | status = ufx_reg_write(dev, 0x7014, 0x0010024F); | |
393 | check_warn_return(status, "error writing 0x7014"); | |
394 | ||
395 | status = ufx_reg_write(dev, 0x7010, 0x00000000); | |
396 | check_warn_return(status, "error writing 0x7010"); | |
397 | ||
398 | status = ufx_reg_clear_bits(dev, 0x700C, 0x0000000A); | |
399 | check_warn_return(status, "error clearing PLL1 bypass in 0x700C"); | |
400 | msleep(1); | |
401 | ||
402 | status = ufx_reg_clear_bits(dev, 0x700C, 0x80000000); | |
403 | check_warn_return(status, "error clearing output gate in 0x700C"); | |
404 | ||
405 | return 0; | |
406 | } | |
407 | ||
408 | static int ufx_config_ddr2(struct ufx_data *dev) | |
409 | { | |
410 | int status, i = 0; | |
411 | u32 tmp; | |
412 | ||
413 | status = ufx_reg_write(dev, 0x0004, 0x001F0F77); | |
414 | check_warn_return(status, "error writing 0x0004"); | |
415 | ||
416 | status = ufx_reg_write(dev, 0x0008, 0xFFF00000); | |
417 | check_warn_return(status, "error writing 0x0008"); | |
418 | ||
419 | status = ufx_reg_write(dev, 0x000C, 0x0FFF2222); | |
420 | check_warn_return(status, "error writing 0x000C"); | |
421 | ||
422 | status = ufx_reg_write(dev, 0x0010, 0x00030814); | |
423 | check_warn_return(status, "error writing 0x0010"); | |
424 | ||
425 | status = ufx_reg_write(dev, 0x0014, 0x00500019); | |
426 | check_warn_return(status, "error writing 0x0014"); | |
427 | ||
428 | status = ufx_reg_write(dev, 0x0018, 0x020D0F15); | |
429 | check_warn_return(status, "error writing 0x0018"); | |
430 | ||
431 | status = ufx_reg_write(dev, 0x001C, 0x02532305); | |
432 | check_warn_return(status, "error writing 0x001C"); | |
433 | ||
434 | status = ufx_reg_write(dev, 0x0020, 0x0B030905); | |
435 | check_warn_return(status, "error writing 0x0020"); | |
436 | ||
437 | status = ufx_reg_write(dev, 0x0024, 0x00000827); | |
438 | check_warn_return(status, "error writing 0x0024"); | |
439 | ||
440 | status = ufx_reg_write(dev, 0x0028, 0x00000000); | |
441 | check_warn_return(status, "error writing 0x0028"); | |
442 | ||
443 | status = ufx_reg_write(dev, 0x002C, 0x00000042); | |
444 | check_warn_return(status, "error writing 0x002C"); | |
445 | ||
446 | status = ufx_reg_write(dev, 0x0030, 0x09520000); | |
447 | check_warn_return(status, "error writing 0x0030"); | |
448 | ||
449 | status = ufx_reg_write(dev, 0x0034, 0x02223314); | |
450 | check_warn_return(status, "error writing 0x0034"); | |
451 | ||
452 | status = ufx_reg_write(dev, 0x0038, 0x00430043); | |
453 | check_warn_return(status, "error writing 0x0038"); | |
454 | ||
455 | status = ufx_reg_write(dev, 0x003C, 0xF00F000F); | |
456 | check_warn_return(status, "error writing 0x003C"); | |
457 | ||
458 | status = ufx_reg_write(dev, 0x0040, 0xF380F00F); | |
459 | check_warn_return(status, "error writing 0x0040"); | |
460 | ||
461 | status = ufx_reg_write(dev, 0x0044, 0xF00F0496); | |
462 | check_warn_return(status, "error writing 0x0044"); | |
463 | ||
464 | status = ufx_reg_write(dev, 0x0048, 0x03080406); | |
465 | check_warn_return(status, "error writing 0x0048"); | |
466 | ||
467 | status = ufx_reg_write(dev, 0x004C, 0x00001000); | |
468 | check_warn_return(status, "error writing 0x004C"); | |
469 | ||
470 | status = ufx_reg_write(dev, 0x005C, 0x00000007); | |
471 | check_warn_return(status, "error writing 0x005C"); | |
472 | ||
473 | status = ufx_reg_write(dev, 0x0100, 0x54F00012); | |
474 | check_warn_return(status, "error writing 0x0100"); | |
475 | ||
476 | status = ufx_reg_write(dev, 0x0104, 0x00004012); | |
477 | check_warn_return(status, "error writing 0x0104"); | |
478 | ||
479 | status = ufx_reg_write(dev, 0x0118, 0x40404040); | |
480 | check_warn_return(status, "error writing 0x0118"); | |
481 | ||
482 | status = ufx_reg_write(dev, 0x0000, 0x00000001); | |
483 | check_warn_return(status, "error writing 0x0000"); | |
484 | ||
485 | while (i++ < 500) { | |
486 | status = ufx_reg_read(dev, 0x0000, &tmp); | |
487 | check_warn_return(status, "error reading 0x0000"); | |
488 | ||
489 | if (all_bits_set(tmp, 0xC0000000)) | |
490 | return 0; | |
491 | } | |
492 | ||
493 | pr_err("DDR2 initialisation timed out, reg 0x0000=0x%08x", tmp); | |
494 | return -ETIMEDOUT; | |
495 | } | |
496 | ||
497 | struct pll_values { | |
498 | u32 div_r0; | |
499 | u32 div_f0; | |
500 | u32 div_q0; | |
501 | u32 range0; | |
502 | u32 div_r1; | |
503 | u32 div_f1; | |
504 | u32 div_q1; | |
505 | u32 range1; | |
506 | }; | |
507 | ||
508 | static u32 ufx_calc_range(u32 ref_freq) | |
509 | { | |
510 | if (ref_freq >= 88000000) | |
511 | return 7; | |
512 | ||
513 | if (ref_freq >= 54000000) | |
514 | return 6; | |
515 | ||
516 | if (ref_freq >= 34000000) | |
517 | return 5; | |
518 | ||
519 | if (ref_freq >= 21000000) | |
520 | return 4; | |
521 | ||
522 | if (ref_freq >= 13000000) | |
523 | return 3; | |
524 | ||
525 | if (ref_freq >= 8000000) | |
526 | return 2; | |
527 | ||
528 | return 1; | |
529 | } | |
530 | ||
531 | /* calculates PLL divider settings for a desired target frequency */ | |
532 | static void ufx_calc_pll_values(const u32 clk_pixel_pll, struct pll_values *asic_pll) | |
533 | { | |
534 | const u32 ref_clk = 25000000; | |
535 | u32 div_r0, div_f0, div_q0, div_r1, div_f1, div_q1; | |
536 | u32 min_error = clk_pixel_pll; | |
537 | ||
538 | for (div_r0 = 1; div_r0 <= 32; div_r0++) { | |
539 | u32 ref_freq0 = ref_clk / div_r0; | |
540 | if (ref_freq0 < 5000000) | |
541 | break; | |
542 | ||
543 | if (ref_freq0 > 200000000) | |
544 | continue; | |
545 | ||
546 | for (div_f0 = 1; div_f0 <= 256; div_f0++) { | |
547 | u32 vco_freq0 = ref_freq0 * div_f0; | |
548 | ||
549 | if (vco_freq0 < 350000000) | |
550 | continue; | |
551 | ||
552 | if (vco_freq0 > 700000000) | |
553 | break; | |
554 | ||
555 | for (div_q0 = 0; div_q0 < 7; div_q0++) { | |
556 | u32 pllout_freq0 = vco_freq0 / (1 << div_q0); | |
557 | ||
558 | if (pllout_freq0 < 5000000) | |
559 | break; | |
560 | ||
561 | if (pllout_freq0 > 200000000) | |
562 | continue; | |
563 | ||
564 | for (div_r1 = 1; div_r1 <= 32; div_r1++) { | |
565 | u32 ref_freq1 = pllout_freq0 / div_r1; | |
566 | ||
567 | if (ref_freq1 < 5000000) | |
568 | break; | |
569 | ||
570 | for (div_f1 = 1; div_f1 <= 256; div_f1++) { | |
571 | u32 vco_freq1 = ref_freq1 * div_f1; | |
572 | ||
573 | if (vco_freq1 < 350000000) | |
574 | continue; | |
575 | ||
576 | if (vco_freq1 > 700000000) | |
577 | break; | |
578 | ||
579 | for (div_q1 = 0; div_q1 < 7; div_q1++) { | |
580 | u32 pllout_freq1 = vco_freq1 / (1 << div_q1); | |
581 | int error = abs(pllout_freq1 - clk_pixel_pll); | |
582 | ||
583 | if (pllout_freq1 < 5000000) | |
584 | break; | |
585 | ||
586 | if (pllout_freq1 > 700000000) | |
587 | continue; | |
588 | ||
589 | if (error < min_error) { | |
590 | min_error = error; | |
591 | ||
592 | /* final returned value is equal to calculated value - 1 | |
593 | * because a value of 0 = divide by 1 */ | |
594 | asic_pll->div_r0 = div_r0 - 1; | |
595 | asic_pll->div_f0 = div_f0 - 1; | |
596 | asic_pll->div_q0 = div_q0; | |
597 | asic_pll->div_r1 = div_r1 - 1; | |
598 | asic_pll->div_f1 = div_f1 - 1; | |
599 | asic_pll->div_q1 = div_q1; | |
600 | ||
601 | asic_pll->range0 = ufx_calc_range(ref_freq0); | |
602 | asic_pll->range1 = ufx_calc_range(ref_freq1); | |
603 | ||
604 | if (min_error == 0) | |
605 | return; | |
606 | } | |
607 | } | |
608 | } | |
609 | } | |
610 | } | |
611 | } | |
612 | } | |
613 | } | |
614 | ||
615 | /* sets analog bit PLL configuration values */ | |
616 | static int ufx_config_pix_clk(struct ufx_data *dev, u32 pixclock) | |
617 | { | |
618 | struct pll_values asic_pll = {0}; | |
619 | u32 value, clk_pixel, clk_pixel_pll; | |
620 | int status; | |
621 | ||
622 | /* convert pixclock (in ps) to frequency (in Hz) */ | |
623 | clk_pixel = PICOS2KHZ(pixclock) * 1000; | |
624 | pr_debug("pixclock %d ps = clk_pixel %d Hz", pixclock, clk_pixel); | |
625 | ||
626 | /* clk_pixel = 1/2 clk_pixel_pll */ | |
627 | clk_pixel_pll = clk_pixel * 2; | |
628 | ||
629 | ufx_calc_pll_values(clk_pixel_pll, &asic_pll); | |
630 | ||
631 | /* Keep BYPASS and RESET signals asserted until configured */ | |
632 | status = ufx_reg_write(dev, 0x7000, 0x8000000F); | |
633 | check_warn_return(status, "error writing 0x7000"); | |
634 | ||
635 | value = (asic_pll.div_f1 | (asic_pll.div_r1 << 8) | | |
636 | (asic_pll.div_q1 << 16) | (asic_pll.range1 << 20)); | |
637 | status = ufx_reg_write(dev, 0x7008, value); | |
638 | check_warn_return(status, "error writing 0x7008"); | |
639 | ||
640 | value = (asic_pll.div_f0 | (asic_pll.div_r0 << 8) | | |
641 | (asic_pll.div_q0 << 16) | (asic_pll.range0 << 20)); | |
642 | status = ufx_reg_write(dev, 0x7004, value); | |
643 | check_warn_return(status, "error writing 0x7004"); | |
644 | ||
645 | status = ufx_reg_clear_bits(dev, 0x7000, 0x00000005); | |
646 | check_warn_return(status, | |
647 | "error clearing PLL0 bypass bits in 0x7000"); | |
648 | msleep(1); | |
649 | ||
650 | status = ufx_reg_clear_bits(dev, 0x7000, 0x0000000A); | |
651 | check_warn_return(status, | |
652 | "error clearing PLL1 bypass bits in 0x7000"); | |
653 | msleep(1); | |
654 | ||
655 | status = ufx_reg_clear_bits(dev, 0x7000, 0x80000000); | |
656 | check_warn_return(status, "error clearing gate bits in 0x7000"); | |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
661 | static int ufx_set_vid_mode(struct ufx_data *dev, struct fb_var_screeninfo *var) | |
662 | { | |
663 | u32 temp; | |
664 | u16 h_total, h_active, h_blank_start, h_blank_end, h_sync_start, h_sync_end; | |
665 | u16 v_total, v_active, v_blank_start, v_blank_end, v_sync_start, v_sync_end; | |
666 | ||
667 | int status = ufx_reg_write(dev, 0x8028, 0); | |
668 | check_warn_return(status, "ufx_set_vid_mode error disabling RGB pad"); | |
669 | ||
670 | status = ufx_reg_write(dev, 0x8024, 0); | |
671 | check_warn_return(status, "ufx_set_vid_mode error disabling VDAC"); | |
672 | ||
673 | /* shut everything down before changing timing */ | |
674 | status = ufx_blank(dev, true); | |
675 | check_warn_return(status, "ufx_set_vid_mode error blanking display"); | |
676 | ||
677 | status = ufx_disable(dev, true); | |
678 | check_warn_return(status, "ufx_set_vid_mode error disabling display"); | |
679 | ||
680 | status = ufx_config_pix_clk(dev, var->pixclock); | |
681 | check_warn_return(status, "ufx_set_vid_mode error configuring pixclock"); | |
682 | ||
683 | status = ufx_reg_write(dev, 0x2000, 0x00000104); | |
684 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2000"); | |
685 | ||
686 | /* set horizontal timings */ | |
687 | h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin; | |
688 | h_active = var->xres; | |
689 | h_blank_start = var->xres + var->right_margin; | |
690 | h_blank_end = var->xres + var->right_margin + var->hsync_len; | |
691 | h_sync_start = var->xres + var->right_margin; | |
692 | h_sync_end = var->xres + var->right_margin + var->hsync_len; | |
693 | ||
694 | temp = ((h_total - 1) << 16) | (h_active - 1); | |
695 | status = ufx_reg_write(dev, 0x2008, temp); | |
696 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2008"); | |
697 | ||
698 | temp = ((h_blank_start - 1) << 16) | (h_blank_end - 1); | |
699 | status = ufx_reg_write(dev, 0x200C, temp); | |
700 | check_warn_return(status, "ufx_set_vid_mode error writing 0x200C"); | |
701 | ||
702 | temp = ((h_sync_start - 1) << 16) | (h_sync_end - 1); | |
703 | status = ufx_reg_write(dev, 0x2010, temp); | |
704 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2010"); | |
705 | ||
706 | /* set vertical timings */ | |
707 | v_total = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; | |
708 | v_active = var->yres; | |
709 | v_blank_start = var->yres + var->lower_margin; | |
710 | v_blank_end = var->yres + var->lower_margin + var->vsync_len; | |
711 | v_sync_start = var->yres + var->lower_margin; | |
712 | v_sync_end = var->yres + var->lower_margin + var->vsync_len; | |
713 | ||
714 | temp = ((v_total - 1) << 16) | (v_active - 1); | |
715 | status = ufx_reg_write(dev, 0x2014, temp); | |
716 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2014"); | |
717 | ||
718 | temp = ((v_blank_start - 1) << 16) | (v_blank_end - 1); | |
719 | status = ufx_reg_write(dev, 0x2018, temp); | |
720 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2018"); | |
721 | ||
722 | temp = ((v_sync_start - 1) << 16) | (v_sync_end - 1); | |
723 | status = ufx_reg_write(dev, 0x201C, temp); | |
724 | check_warn_return(status, "ufx_set_vid_mode error writing 0x201C"); | |
725 | ||
726 | status = ufx_reg_write(dev, 0x2020, 0x00000000); | |
727 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2020"); | |
728 | ||
729 | status = ufx_reg_write(dev, 0x2024, 0x00000000); | |
730 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2024"); | |
731 | ||
732 | /* Set the frame length register (#pix * 2 bytes/pixel) */ | |
733 | temp = var->xres * var->yres * 2; | |
734 | temp = (temp + 7) & (~0x7); | |
735 | status = ufx_reg_write(dev, 0x2028, temp); | |
736 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2028"); | |
737 | ||
738 | /* enable desired output interface & disable others */ | |
739 | status = ufx_reg_write(dev, 0x2040, 0); | |
740 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2040"); | |
741 | ||
742 | status = ufx_reg_write(dev, 0x2044, 0); | |
743 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2044"); | |
744 | ||
745 | status = ufx_reg_write(dev, 0x2048, 0); | |
746 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2048"); | |
747 | ||
748 | /* set the sync polarities & enable bit */ | |
749 | temp = 0x00000001; | |
750 | if (var->sync & FB_SYNC_HOR_HIGH_ACT) | |
751 | temp |= 0x00000010; | |
752 | ||
753 | if (var->sync & FB_SYNC_VERT_HIGH_ACT) | |
754 | temp |= 0x00000008; | |
755 | ||
756 | status = ufx_reg_write(dev, 0x2040, temp); | |
757 | check_warn_return(status, "ufx_set_vid_mode error writing 0x2040"); | |
758 | ||
759 | /* start everything back up */ | |
760 | status = ufx_enable(dev, true); | |
761 | check_warn_return(status, "ufx_set_vid_mode error enabling display"); | |
762 | ||
763 | /* Unblank the display */ | |
764 | status = ufx_unblank(dev, true); | |
765 | check_warn_return(status, "ufx_set_vid_mode error unblanking display"); | |
766 | ||
767 | /* enable RGB pad */ | |
768 | status = ufx_reg_write(dev, 0x8028, 0x00000003); | |
769 | check_warn_return(status, "ufx_set_vid_mode error enabling RGB pad"); | |
770 | ||
771 | /* enable VDAC */ | |
772 | status = ufx_reg_write(dev, 0x8024, 0x00000007); | |
773 | check_warn_return(status, "ufx_set_vid_mode error enabling VDAC"); | |
774 | ||
775 | return 0; | |
776 | } | |
777 | ||
778 | static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) | |
779 | { | |
780 | unsigned long start = vma->vm_start; | |
781 | unsigned long size = vma->vm_end - vma->vm_start; | |
782 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
783 | unsigned long page, pos; | |
784 | ||
785 | if (offset + size > info->fix.smem_len) | |
786 | return -EINVAL; | |
787 | ||
788 | pos = (unsigned long)info->fix.smem_start + offset; | |
789 | ||
790 | pr_debug("mmap() framebuffer addr:%lu size:%lu\n", | |
791 | pos, size); | |
792 | ||
793 | while (size > 0) { | |
794 | page = vmalloc_to_pfn((void *)pos); | |
795 | if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) | |
796 | return -EAGAIN; | |
797 | ||
798 | start += PAGE_SIZE; | |
799 | pos += PAGE_SIZE; | |
800 | if (size > PAGE_SIZE) | |
801 | size -= PAGE_SIZE; | |
802 | else | |
803 | size = 0; | |
804 | } | |
805 | ||
3c8a63e2 SG |
806 | return 0; |
807 | } | |
808 | ||
be444890 | 809 | static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y, |
3c8a63e2 SG |
810 | int width, int height) |
811 | { | |
812 | size_t packed_line_len = ALIGN((width * 2), 4); | |
813 | size_t packed_rect_len = packed_line_len * height; | |
814 | int line; | |
815 | ||
816 | BUG_ON(!dev); | |
817 | BUG_ON(!dev->info); | |
818 | ||
819 | /* command word */ | |
820 | *((u32 *)&cmd[0]) = cpu_to_le32(0x01); | |
821 | ||
822 | /* length word */ | |
be444890 | 823 | *((u32 *)&cmd[2]) = cpu_to_le32(packed_rect_len + 16); |
3c8a63e2 | 824 | |
be444890 SG |
825 | cmd[4] = cpu_to_le16(x); |
826 | cmd[5] = cpu_to_le16(y); | |
827 | cmd[6] = cpu_to_le16(width); | |
828 | cmd[7] = cpu_to_le16(height); | |
3c8a63e2 SG |
829 | |
830 | /* frame base address */ | |
be444890 | 831 | *((u32 *)&cmd[8]) = cpu_to_le32(0); |
3c8a63e2 SG |
832 | |
833 | /* color mode and horizontal resolution */ | |
be444890 | 834 | cmd[10] = cpu_to_le16(0x4000 | dev->info->var.xres); |
3c8a63e2 SG |
835 | |
836 | /* vertical resolution */ | |
be444890 | 837 | cmd[11] = cpu_to_le16(dev->info->var.yres); |
3c8a63e2 SG |
838 | |
839 | /* packed data */ | |
840 | for (line = 0; line < height; line++) { | |
841 | const int line_offset = dev->info->fix.line_length * (y + line); | |
842 | const int byte_offset = line_offset + (x * BPP); | |
be444890 | 843 | memcpy(&cmd[(24 + (packed_line_len * line)) / 2], |
3c8a63e2 SG |
844 | (char *)dev->info->fix.smem_start + byte_offset, width * BPP); |
845 | } | |
846 | } | |
847 | ||
35f3ec1c | 848 | static int ufx_handle_damage(struct ufx_data *dev, int x, int y, |
3c8a63e2 SG |
849 | int width, int height) |
850 | { | |
851 | size_t packed_line_len = ALIGN((width * 2), 4); | |
852 | int len, status, urb_lines, start_line = 0; | |
853 | ||
854 | if ((width <= 0) || (height <= 0) || | |
855 | (x + width > dev->info->var.xres) || | |
856 | (y + height > dev->info->var.yres)) | |
857 | return -EINVAL; | |
858 | ||
859 | if (!atomic_read(&dev->usb_active)) | |
860 | return 0; | |
861 | ||
862 | while (start_line < height) { | |
863 | struct urb *urb = ufx_get_urb(dev); | |
864 | if (!urb) { | |
865 | pr_warn("ufx_handle_damage unable to get urb"); | |
866 | return 0; | |
867 | } | |
868 | ||
869 | /* assume we have enough space to transfer at least one line */ | |
870 | BUG_ON(urb->transfer_buffer_length < (24 + (width * 2))); | |
871 | ||
872 | /* calculate the maximum number of lines we could fit in */ | |
873 | urb_lines = (urb->transfer_buffer_length - 24) / packed_line_len; | |
874 | ||
875 | /* but we might not need this many */ | |
876 | urb_lines = min(urb_lines, (height - start_line)); | |
877 | ||
878 | memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); | |
879 | ||
880 | ufx_raw_rect(dev, urb->transfer_buffer, x, (y + start_line), width, urb_lines); | |
881 | len = 24 + (packed_line_len * urb_lines); | |
882 | ||
883 | status = ufx_submit_urb(dev, urb, len); | |
884 | check_warn_return(status, "Error submitting URB"); | |
885 | ||
886 | start_line += urb_lines; | |
887 | } | |
888 | ||
889 | return 0; | |
890 | } | |
891 | ||
892 | /* Path triggered by usermode clients who write to filesystem | |
893 | * e.g. cat filename > /dev/fb1 | |
894 | * Not used by X Windows or text-mode console. But useful for testing. | |
895 | * Slow because of extra copy and we must assume all pixels dirty. */ | |
896 | static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf, | |
897 | size_t count, loff_t *ppos) | |
898 | { | |
899 | ssize_t result; | |
900 | struct ufx_data *dev = info->par; | |
901 | u32 offset = (u32) *ppos; | |
902 | ||
903 | result = fb_sys_write(info, buf, count, ppos); | |
904 | ||
905 | if (result > 0) { | |
2fe2d9f4 | 906 | int start = max((int)(offset / info->fix.line_length), 0); |
3c8a63e2 SG |
907 | int lines = min((u32)((result / info->fix.line_length) + 1), |
908 | (u32)info->var.yres); | |
909 | ||
910 | ufx_handle_damage(dev, 0, start, info->var.xres, lines); | |
911 | } | |
912 | ||
913 | return result; | |
914 | } | |
915 | ||
916 | static void ufx_ops_copyarea(struct fb_info *info, | |
917 | const struct fb_copyarea *area) | |
918 | { | |
919 | ||
920 | struct ufx_data *dev = info->par; | |
921 | ||
922 | sys_copyarea(info, area); | |
923 | ||
924 | ufx_handle_damage(dev, area->dx, area->dy, | |
925 | area->width, area->height); | |
926 | } | |
927 | ||
928 | static void ufx_ops_imageblit(struct fb_info *info, | |
929 | const struct fb_image *image) | |
930 | { | |
931 | struct ufx_data *dev = info->par; | |
932 | ||
933 | sys_imageblit(info, image); | |
934 | ||
935 | ufx_handle_damage(dev, image->dx, image->dy, | |
936 | image->width, image->height); | |
937 | } | |
938 | ||
939 | static void ufx_ops_fillrect(struct fb_info *info, | |
940 | const struct fb_fillrect *rect) | |
941 | { | |
942 | struct ufx_data *dev = info->par; | |
943 | ||
944 | sys_fillrect(info, rect); | |
945 | ||
946 | ufx_handle_damage(dev, rect->dx, rect->dy, rect->width, | |
947 | rect->height); | |
948 | } | |
949 | ||
950 | /* NOTE: fb_defio.c is holding info->fbdefio.mutex | |
951 | * Touching ANY framebuffer memory that triggers a page fault | |
952 | * in fb_defio will cause a deadlock, when it also tries to | |
953 | * grab the same mutex. */ | |
954 | static void ufx_dpy_deferred_io(struct fb_info *info, | |
955 | struct list_head *pagelist) | |
956 | { | |
957 | struct page *cur; | |
958 | struct fb_deferred_io *fbdefio = info->fbdefio; | |
959 | struct ufx_data *dev = info->par; | |
960 | ||
961 | if (!fb_defio) | |
962 | return; | |
963 | ||
964 | if (!atomic_read(&dev->usb_active)) | |
965 | return; | |
966 | ||
967 | /* walk the written page list and render each to device */ | |
968 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { | |
969 | /* create a rectangle of full screen width that encloses the | |
970 | * entire dirty framebuffer page */ | |
971 | const int x = 0; | |
972 | const int width = dev->info->var.xres; | |
973 | const int y = (cur->index << PAGE_SHIFT) / (width * 2); | |
974 | int height = (PAGE_SIZE / (width * 2)) + 1; | |
975 | height = min(height, (int)(dev->info->var.yres - y)); | |
976 | ||
977 | BUG_ON(y >= dev->info->var.yres); | |
978 | BUG_ON((y + height) > dev->info->var.yres); | |
979 | ||
980 | ufx_handle_damage(dev, x, y, width, height); | |
981 | } | |
982 | } | |
983 | ||
984 | static int ufx_ops_ioctl(struct fb_info *info, unsigned int cmd, | |
985 | unsigned long arg) | |
986 | { | |
987 | struct ufx_data *dev = info->par; | |
988 | struct dloarea *area = NULL; | |
989 | ||
990 | if (!atomic_read(&dev->usb_active)) | |
991 | return 0; | |
992 | ||
993 | /* TODO: Update X server to get this from sysfs instead */ | |
994 | if (cmd == UFX_IOCTL_RETURN_EDID) { | |
261e7676 | 995 | u8 __user *edid = (u8 __user *)arg; |
3c8a63e2 SG |
996 | if (copy_to_user(edid, dev->edid, dev->edid_size)) |
997 | return -EFAULT; | |
998 | return 0; | |
999 | } | |
1000 | ||
1001 | /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ | |
1002 | if (cmd == UFX_IOCTL_REPORT_DAMAGE) { | |
1003 | /* If we have a damage-aware client, turn fb_defio "off" | |
ff0c2642 | 1004 | * To avoid perf imact of unnecessary page fault handling. |
3c8a63e2 SG |
1005 | * Done by resetting the delay for this fb_info to a very |
1006 | * long period. Pages will become writable and stay that way. | |
1007 | * Reset to normal value when all clients have closed this fb. | |
1008 | */ | |
1009 | if (info->fbdefio) | |
1010 | info->fbdefio->delay = UFX_DEFIO_WRITE_DISABLE; | |
1011 | ||
1012 | area = (struct dloarea *)arg; | |
1013 | ||
1014 | if (area->x < 0) | |
1015 | area->x = 0; | |
1016 | ||
1017 | if (area->x > info->var.xres) | |
1018 | area->x = info->var.xres; | |
1019 | ||
1020 | if (area->y < 0) | |
1021 | area->y = 0; | |
1022 | ||
1023 | if (area->y > info->var.yres) | |
1024 | area->y = info->var.yres; | |
1025 | ||
1026 | ufx_handle_damage(dev, area->x, area->y, area->w, area->h); | |
1027 | } | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | /* taken from vesafb */ | |
1033 | static int | |
1034 | ufx_ops_setcolreg(unsigned regno, unsigned red, unsigned green, | |
1035 | unsigned blue, unsigned transp, struct fb_info *info) | |
1036 | { | |
1037 | int err = 0; | |
1038 | ||
1039 | if (regno >= info->cmap.len) | |
1040 | return 1; | |
1041 | ||
1042 | if (regno < 16) { | |
1043 | if (info->var.red.offset == 10) { | |
1044 | /* 1:5:5:5 */ | |
1045 | ((u32 *) (info->pseudo_palette))[regno] = | |
1046 | ((red & 0xf800) >> 1) | | |
1047 | ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); | |
1048 | } else { | |
1049 | /* 0:5:6:5 */ | |
1050 | ((u32 *) (info->pseudo_palette))[regno] = | |
1051 | ((red & 0xf800)) | | |
1052 | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); | |
1053 | } | |
1054 | } | |
1055 | ||
1056 | return err; | |
1057 | } | |
1058 | ||
1059 | /* It's common for several clients to have framebuffer open simultaneously. | |
1060 | * e.g. both fbcon and X. Makes things interesting. | |
1061 | * Assumes caller is holding info->lock (for open and release at least) */ | |
1062 | static int ufx_ops_open(struct fb_info *info, int user) | |
1063 | { | |
1064 | struct ufx_data *dev = info->par; | |
1065 | ||
1066 | /* fbcon aggressively connects to first framebuffer it finds, | |
1067 | * preventing other clients (X) from working properly. Usually | |
1068 | * not what the user wants. Fail by default with option to enable. */ | |
1069 | if (user == 0 && !console) | |
1070 | return -EBUSY; | |
1071 | ||
1072 | /* If the USB device is gone, we don't accept new opens */ | |
1073 | if (dev->virtualized) | |
1074 | return -ENODEV; | |
1075 | ||
1076 | dev->fb_count++; | |
1077 | ||
1078 | kref_get(&dev->kref); | |
1079 | ||
1080 | if (fb_defio && (info->fbdefio == NULL)) { | |
1081 | /* enable defio at last moment if not disabled by client */ | |
1082 | ||
1083 | struct fb_deferred_io *fbdefio; | |
1084 | ||
1f45f9db | 1085 | fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); |
3c8a63e2 SG |
1086 | |
1087 | if (fbdefio) { | |
1088 | fbdefio->delay = UFX_DEFIO_WRITE_DELAY; | |
1089 | fbdefio->deferred_io = ufx_dpy_deferred_io; | |
1090 | } | |
1091 | ||
1092 | info->fbdefio = fbdefio; | |
1093 | fb_deferred_io_init(info); | |
1094 | } | |
1095 | ||
1096 | pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d", | |
1097 | info->node, user, info, dev->fb_count); | |
1098 | ||
1099 | return 0; | |
1100 | } | |
1101 | ||
1102 | /* | |
1103 | * Called when all client interfaces to start transactions have been disabled, | |
1104 | * and all references to our device instance (ufx_data) are released. | |
1105 | * Every transaction must have a reference, so we know are fully spun down | |
1106 | */ | |
1107 | static void ufx_free(struct kref *kref) | |
1108 | { | |
1109 | struct ufx_data *dev = container_of(kref, struct ufx_data, kref); | |
1110 | ||
1111 | /* this function will wait for all in-flight urbs to complete */ | |
1112 | if (dev->urbs.count > 0) | |
1113 | ufx_free_urb_list(dev); | |
1114 | ||
1115 | pr_debug("freeing ufx_data %p", dev); | |
1116 | ||
1117 | kfree(dev); | |
1118 | } | |
1119 | ||
1120 | static void ufx_release_urb_work(struct work_struct *work) | |
1121 | { | |
1122 | struct urb_node *unode = container_of(work, struct urb_node, | |
1123 | release_urb_work.work); | |
1124 | ||
1125 | up(&unode->dev->urbs.limit_sem); | |
1126 | } | |
1127 | ||
1128 | static void ufx_free_framebuffer_work(struct work_struct *work) | |
1129 | { | |
1130 | struct ufx_data *dev = container_of(work, struct ufx_data, | |
1131 | free_framebuffer_work.work); | |
1132 | struct fb_info *info = dev->info; | |
1133 | int node = info->node; | |
1134 | ||
1135 | unregister_framebuffer(info); | |
1136 | ||
1137 | if (info->cmap.len != 0) | |
1138 | fb_dealloc_cmap(&info->cmap); | |
1139 | if (info->monspecs.modedb) | |
1140 | fb_destroy_modedb(info->monspecs.modedb); | |
1141 | if (info->screen_base) | |
1142 | vfree(info->screen_base); | |
1143 | ||
1144 | fb_destroy_modelist(&info->modelist); | |
1145 | ||
1146 | dev->info = 0; | |
1147 | ||
1148 | /* Assume info structure is freed after this point */ | |
1149 | framebuffer_release(info); | |
1150 | ||
1151 | pr_debug("fb_info for /dev/fb%d has been freed", node); | |
1152 | ||
1153 | /* ref taken in probe() as part of registering framebfufer */ | |
1154 | kref_put(&dev->kref, ufx_free); | |
1155 | } | |
1156 | ||
1157 | /* | |
1158 | * Assumes caller is holding info->lock mutex (for open and release at least) | |
1159 | */ | |
1160 | static int ufx_ops_release(struct fb_info *info, int user) | |
1161 | { | |
1162 | struct ufx_data *dev = info->par; | |
1163 | ||
1164 | dev->fb_count--; | |
1165 | ||
1166 | /* We can't free fb_info here - fbmem will touch it when we return */ | |
1167 | if (dev->virtualized && (dev->fb_count == 0)) | |
1168 | schedule_delayed_work(&dev->free_framebuffer_work, HZ); | |
1169 | ||
1170 | if ((dev->fb_count == 0) && (info->fbdefio)) { | |
1171 | fb_deferred_io_cleanup(info); | |
1172 | kfree(info->fbdefio); | |
1173 | info->fbdefio = NULL; | |
1174 | info->fbops->fb_mmap = ufx_ops_mmap; | |
1175 | } | |
1176 | ||
1177 | pr_debug("released /dev/fb%d user=%d count=%d", | |
1178 | info->node, user, dev->fb_count); | |
1179 | ||
1180 | kref_put(&dev->kref, ufx_free); | |
1181 | ||
1182 | return 0; | |
1183 | } | |
1184 | ||
1185 | /* Check whether a video mode is supported by the chip | |
1186 | * We start from monitor's modes, so don't need to filter that here */ | |
1187 | static int ufx_is_valid_mode(struct fb_videomode *mode, | |
1188 | struct fb_info *info) | |
1189 | { | |
1190 | if ((mode->xres * mode->yres) > (2048 * 1152)) { | |
1191 | pr_debug("%dx%d too many pixels", | |
1192 | mode->xres, mode->yres); | |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | if (mode->pixclock < 5000) { | |
1197 | pr_debug("%dx%d %dps pixel clock too fast", | |
1198 | mode->xres, mode->yres, mode->pixclock); | |
1199 | return 0; | |
1200 | } | |
1201 | ||
1202 | pr_debug("%dx%d (pixclk %dps %dMHz) valid mode", mode->xres, mode->yres, | |
1203 | mode->pixclock, (1000000 / mode->pixclock)); | |
1204 | return 1; | |
1205 | } | |
1206 | ||
1207 | static void ufx_var_color_format(struct fb_var_screeninfo *var) | |
1208 | { | |
1209 | const struct fb_bitfield red = { 11, 5, 0 }; | |
1210 | const struct fb_bitfield green = { 5, 6, 0 }; | |
1211 | const struct fb_bitfield blue = { 0, 5, 0 }; | |
1212 | ||
1213 | var->bits_per_pixel = 16; | |
1214 | var->red = red; | |
1215 | var->green = green; | |
1216 | var->blue = blue; | |
1217 | } | |
1218 | ||
1219 | static int ufx_ops_check_var(struct fb_var_screeninfo *var, | |
1220 | struct fb_info *info) | |
1221 | { | |
1222 | struct fb_videomode mode; | |
1223 | ||
1224 | /* TODO: support dynamically changing framebuffer size */ | |
1225 | if ((var->xres * var->yres * 2) > info->fix.smem_len) | |
1226 | return -EINVAL; | |
1227 | ||
1228 | /* set device-specific elements of var unrelated to mode */ | |
1229 | ufx_var_color_format(var); | |
1230 | ||
1231 | fb_var_to_videomode(&mode, var); | |
1232 | ||
1233 | if (!ufx_is_valid_mode(&mode, info)) | |
1234 | return -EINVAL; | |
1235 | ||
1236 | return 0; | |
1237 | } | |
1238 | ||
1239 | static int ufx_ops_set_par(struct fb_info *info) | |
1240 | { | |
1241 | struct ufx_data *dev = info->par; | |
1242 | int result; | |
1243 | u16 *pix_framebuffer; | |
1244 | int i; | |
1245 | ||
1246 | pr_debug("set_par mode %dx%d", info->var.xres, info->var.yres); | |
1247 | result = ufx_set_vid_mode(dev, &info->var); | |
1248 | ||
1249 | if ((result == 0) && (dev->fb_count == 0)) { | |
1250 | /* paint greenscreen */ | |
1251 | pix_framebuffer = (u16 *) info->screen_base; | |
1252 | for (i = 0; i < info->fix.smem_len / 2; i++) | |
1253 | pix_framebuffer[i] = 0x37e6; | |
1254 | ||
1255 | ufx_handle_damage(dev, 0, 0, info->var.xres, info->var.yres); | |
1256 | } | |
1257 | ||
1258 | /* re-enable defio if previously disabled by damage tracking */ | |
1259 | if (info->fbdefio) | |
1260 | info->fbdefio->delay = UFX_DEFIO_WRITE_DELAY; | |
1261 | ||
1262 | return result; | |
1263 | } | |
1264 | ||
1265 | /* In order to come back from full DPMS off, we need to set the mode again */ | |
1266 | static int ufx_ops_blank(int blank_mode, struct fb_info *info) | |
1267 | { | |
1268 | struct ufx_data *dev = info->par; | |
1269 | ufx_set_vid_mode(dev, &info->var); | |
1270 | return 0; | |
1271 | } | |
1272 | ||
1273 | static struct fb_ops ufx_ops = { | |
1274 | .owner = THIS_MODULE, | |
1275 | .fb_read = fb_sys_read, | |
1276 | .fb_write = ufx_ops_write, | |
1277 | .fb_setcolreg = ufx_ops_setcolreg, | |
1278 | .fb_fillrect = ufx_ops_fillrect, | |
1279 | .fb_copyarea = ufx_ops_copyarea, | |
1280 | .fb_imageblit = ufx_ops_imageblit, | |
1281 | .fb_mmap = ufx_ops_mmap, | |
1282 | .fb_ioctl = ufx_ops_ioctl, | |
1283 | .fb_open = ufx_ops_open, | |
1284 | .fb_release = ufx_ops_release, | |
1285 | .fb_blank = ufx_ops_blank, | |
1286 | .fb_check_var = ufx_ops_check_var, | |
1287 | .fb_set_par = ufx_ops_set_par, | |
1288 | }; | |
1289 | ||
1290 | /* Assumes &info->lock held by caller | |
1291 | * Assumes no active clients have framebuffer open */ | |
1292 | static int ufx_realloc_framebuffer(struct ufx_data *dev, struct fb_info *info) | |
1293 | { | |
1294 | int retval = -ENOMEM; | |
1295 | int old_len = info->fix.smem_len; | |
1296 | int new_len; | |
1297 | unsigned char *old_fb = info->screen_base; | |
1298 | unsigned char *new_fb; | |
1299 | ||
1300 | pr_debug("Reallocating framebuffer. Addresses will change!"); | |
1301 | ||
1302 | new_len = info->fix.line_length * info->var.yres; | |
1303 | ||
1304 | if (PAGE_ALIGN(new_len) > old_len) { | |
1305 | /* | |
1306 | * Alloc system memory for virtual framebuffer | |
1307 | */ | |
1308 | new_fb = vmalloc(new_len); | |
1309 | if (!new_fb) { | |
1310 | pr_err("Virtual framebuffer alloc failed"); | |
1311 | goto error; | |
1312 | } | |
1313 | ||
1314 | if (info->screen_base) { | |
1315 | memcpy(new_fb, old_fb, old_len); | |
1316 | vfree(info->screen_base); | |
1317 | } | |
1318 | ||
1319 | info->screen_base = new_fb; | |
1320 | info->fix.smem_len = PAGE_ALIGN(new_len); | |
1321 | info->fix.smem_start = (unsigned long) new_fb; | |
1322 | info->flags = smscufx_info_flags; | |
1323 | } | |
1324 | ||
1325 | retval = 0; | |
1326 | ||
1327 | error: | |
1328 | return retval; | |
1329 | } | |
1330 | ||
1331 | /* sets up I2C Controller for 100 Kbps, std. speed, 7-bit addr, master, | |
1332 | * restart enabled, but no start byte, enable controller */ | |
1333 | static int ufx_i2c_init(struct ufx_data *dev) | |
1334 | { | |
1335 | u32 tmp; | |
1336 | ||
1337 | /* disable the controller before it can be reprogrammed */ | |
1338 | int status = ufx_reg_write(dev, 0x106C, 0x00); | |
1339 | check_warn_return(status, "failed to disable I2C"); | |
1340 | ||
1341 | /* Setup the clock count registers | |
1342 | * (12+1) = 13 clks @ 2.5 MHz = 5.2 uS */ | |
1343 | status = ufx_reg_write(dev, 0x1018, 12); | |
1344 | check_warn_return(status, "error writing 0x1018"); | |
1345 | ||
1346 | /* (6+8) = 14 clks @ 2.5 MHz = 5.6 uS */ | |
1347 | status = ufx_reg_write(dev, 0x1014, 6); | |
1348 | check_warn_return(status, "error writing 0x1014"); | |
1349 | ||
1350 | status = ufx_reg_read(dev, 0x1000, &tmp); | |
1351 | check_warn_return(status, "error reading 0x1000"); | |
1352 | ||
1353 | /* set speed to std mode */ | |
1354 | tmp &= ~(0x06); | |
1355 | tmp |= 0x02; | |
1356 | ||
1357 | /* 7-bit (not 10-bit) addressing */ | |
1358 | tmp &= ~(0x10); | |
1359 | ||
1360 | /* enable restart conditions and master mode */ | |
1361 | tmp |= 0x21; | |
1362 | ||
1363 | status = ufx_reg_write(dev, 0x1000, tmp); | |
1364 | check_warn_return(status, "error writing 0x1000"); | |
1365 | ||
1366 | /* Set normal tx using target address 0 */ | |
1367 | status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0xC00, 0x000); | |
1368 | check_warn_return(status, "error setting TX mode bits in 0x1004"); | |
1369 | ||
1370 | /* Enable the controller */ | |
1371 | status = ufx_reg_write(dev, 0x106C, 0x01); | |
1372 | check_warn_return(status, "failed to enable I2C"); | |
1373 | ||
1374 | return 0; | |
1375 | } | |
1376 | ||
1377 | /* sets the I2C port mux and target address */ | |
1378 | static int ufx_i2c_configure(struct ufx_data *dev) | |
1379 | { | |
1380 | int status = ufx_reg_write(dev, 0x106C, 0x00); | |
1381 | check_warn_return(status, "failed to disable I2C"); | |
1382 | ||
1383 | status = ufx_reg_write(dev, 0x3010, 0x00000000); | |
1384 | check_warn_return(status, "failed to write 0x3010"); | |
1385 | ||
1386 | /* A0h is std for any EDID, right shifted by one */ | |
1387 | status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0x3FF, (0xA0 >> 1)); | |
1388 | check_warn_return(status, "failed to set TAR bits in 0x1004"); | |
1389 | ||
1390 | status = ufx_reg_write(dev, 0x106C, 0x01); | |
1391 | check_warn_return(status, "failed to enable I2C"); | |
1392 | ||
1393 | return 0; | |
1394 | } | |
1395 | ||
1396 | /* wait for BUSY to clear, with a timeout of 50ms with 10ms sleeps. if no | |
1397 | * monitor is connected, there is no error except for timeout */ | |
1398 | static int ufx_i2c_wait_busy(struct ufx_data *dev) | |
1399 | { | |
1400 | u32 tmp; | |
1401 | int i, status; | |
1402 | ||
1403 | for (i = 0; i < 15; i++) { | |
1404 | status = ufx_reg_read(dev, 0x1100, &tmp); | |
1405 | check_warn_return(status, "0x1100 read failed"); | |
1406 | ||
1407 | /* if BUSY is clear, check for error */ | |
1408 | if ((tmp & 0x80000000) == 0) { | |
1409 | if (tmp & 0x20000000) { | |
1410 | pr_warn("I2C read failed, 0x1100=0x%08x", tmp); | |
1411 | return -EIO; | |
1412 | } | |
1413 | ||
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | /* perform the first 10 retries without delay */ | |
1418 | if (i >= 10) | |
1419 | msleep(10); | |
1420 | } | |
1421 | ||
1422 | pr_warn("I2C access timed out, resetting I2C hardware"); | |
1423 | status = ufx_reg_write(dev, 0x1100, 0x40000000); | |
1424 | check_warn_return(status, "0x1100 write failed"); | |
1425 | ||
1426 | return -ETIMEDOUT; | |
1427 | } | |
1428 | ||
1429 | /* reads a 128-byte EDID block from the currently selected port and TAR */ | |
261e7676 | 1430 | static int ufx_read_edid(struct ufx_data *dev, u8 *edid, int edid_len) |
3c8a63e2 SG |
1431 | { |
1432 | int i, j, status; | |
1433 | u32 *edid_u32 = (u32 *)edid; | |
1434 | ||
1435 | BUG_ON(edid_len != EDID_LENGTH); | |
1436 | ||
1437 | status = ufx_i2c_configure(dev); | |
1438 | if (status < 0) { | |
1439 | pr_err("ufx_i2c_configure failed"); | |
1440 | return status; | |
1441 | } | |
1442 | ||
1443 | memset(edid, 0xff, EDID_LENGTH); | |
1444 | ||
1445 | /* Read the 128-byte EDID as 2 bursts of 64 bytes */ | |
1446 | for (i = 0; i < 2; i++) { | |
1447 | u32 temp = 0x28070000 | (63 << 20) | (((u32)(i * 64)) << 8); | |
1448 | status = ufx_reg_write(dev, 0x1100, temp); | |
1449 | check_warn_return(status, "Failed to write 0x1100"); | |
1450 | ||
1451 | temp |= 0x80000000; | |
1452 | status = ufx_reg_write(dev, 0x1100, temp); | |
1453 | check_warn_return(status, "Failed to write 0x1100"); | |
1454 | ||
1455 | status = ufx_i2c_wait_busy(dev); | |
1456 | check_warn_return(status, "Timeout waiting for I2C BUSY to clear"); | |
1457 | ||
1458 | for (j = 0; j < 16; j++) { | |
1459 | u32 data_reg_addr = 0x1110 + (j * 4); | |
1460 | status = ufx_reg_read(dev, data_reg_addr, edid_u32++); | |
1461 | check_warn_return(status, "Error reading i2c data"); | |
1462 | } | |
1463 | } | |
1464 | ||
1465 | /* all FF's in the first 16 bytes indicates nothing is connected */ | |
1466 | for (i = 0; i < 16; i++) { | |
1467 | if (edid[i] != 0xFF) { | |
ff0c2642 | 1468 | pr_debug("edid data read successfully"); |
3c8a63e2 SG |
1469 | return EDID_LENGTH; |
1470 | } | |
1471 | } | |
1472 | ||
1473 | pr_warn("edid data contains all 0xff"); | |
1474 | return -ETIMEDOUT; | |
1475 | } | |
1476 | ||
1477 | /* 1) use sw default | |
1478 | * 2) Parse into various fb_info structs | |
1479 | * 3) Allocate virtual framebuffer memory to back highest res mode | |
1480 | * | |
1481 | * Parses EDID into three places used by various parts of fbdev: | |
1482 | * fb_var_screeninfo contains the timing of the monitor's preferred mode | |
1483 | * fb_info.monspecs is full parsed EDID info, including monspecs.modedb | |
1484 | * fb_info.modelist is a linked list of all monitor & VESA modes which work | |
1485 | * | |
1486 | * If EDID is not readable/valid, then modelist is all VESA modes, | |
1487 | * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode | |
1488 | * Returns 0 if successful */ | |
1489 | static int ufx_setup_modes(struct ufx_data *dev, struct fb_info *info, | |
1490 | char *default_edid, size_t default_edid_size) | |
1491 | { | |
1492 | const struct fb_videomode *default_vmode = NULL; | |
261e7676 | 1493 | u8 *edid; |
3c8a63e2 SG |
1494 | int i, result = 0, tries = 3; |
1495 | ||
1496 | if (info->dev) /* only use mutex if info has been registered */ | |
1497 | mutex_lock(&info->lock); | |
1498 | ||
1499 | edid = kmalloc(EDID_LENGTH, GFP_KERNEL); | |
1500 | if (!edid) { | |
1501 | result = -ENOMEM; | |
1502 | goto error; | |
1503 | } | |
1504 | ||
1505 | fb_destroy_modelist(&info->modelist); | |
1506 | memset(&info->monspecs, 0, sizeof(info->monspecs)); | |
1507 | ||
1508 | /* Try to (re)read EDID from hardware first | |
1509 | * EDID data may return, but not parse as valid | |
1510 | * Try again a few times, in case of e.g. analog cable noise */ | |
1511 | while (tries--) { | |
1512 | i = ufx_read_edid(dev, edid, EDID_LENGTH); | |
1513 | ||
1514 | if (i >= EDID_LENGTH) | |
1515 | fb_edid_to_monspecs(edid, &info->monspecs); | |
1516 | ||
1517 | if (info->monspecs.modedb_len > 0) { | |
1518 | dev->edid = edid; | |
1519 | dev->edid_size = i; | |
1520 | break; | |
1521 | } | |
1522 | } | |
1523 | ||
1524 | /* If that fails, use a previously returned EDID if available */ | |
1525 | if (info->monspecs.modedb_len == 0) { | |
1526 | pr_err("Unable to get valid EDID from device/display\n"); | |
1527 | ||
1528 | if (dev->edid) { | |
1529 | fb_edid_to_monspecs(dev->edid, &info->monspecs); | |
1530 | if (info->monspecs.modedb_len > 0) | |
1531 | pr_err("Using previously queried EDID\n"); | |
1532 | } | |
1533 | } | |
1534 | ||
1535 | /* If that fails, use the default EDID we were handed */ | |
1536 | if (info->monspecs.modedb_len == 0) { | |
1537 | if (default_edid_size >= EDID_LENGTH) { | |
1538 | fb_edid_to_monspecs(default_edid, &info->monspecs); | |
1539 | if (info->monspecs.modedb_len > 0) { | |
1540 | memcpy(edid, default_edid, default_edid_size); | |
1541 | dev->edid = edid; | |
1542 | dev->edid_size = default_edid_size; | |
1543 | pr_err("Using default/backup EDID\n"); | |
1544 | } | |
1545 | } | |
1546 | } | |
1547 | ||
1548 | /* If we've got modes, let's pick a best default mode */ | |
1549 | if (info->monspecs.modedb_len > 0) { | |
1550 | ||
1551 | for (i = 0; i < info->monspecs.modedb_len; i++) { | |
1552 | if (ufx_is_valid_mode(&info->monspecs.modedb[i], info)) | |
1553 | fb_add_videomode(&info->monspecs.modedb[i], | |
1554 | &info->modelist); | |
1555 | else /* if we've removed top/best mode */ | |
1556 | info->monspecs.misc &= ~FB_MISC_1ST_DETAIL; | |
1557 | } | |
1558 | ||
1559 | default_vmode = fb_find_best_display(&info->monspecs, | |
1560 | &info->modelist); | |
1561 | } | |
1562 | ||
1563 | /* If everything else has failed, fall back to safe default mode */ | |
1564 | if (default_vmode == NULL) { | |
1565 | ||
1566 | struct fb_videomode fb_vmode = {0}; | |
1567 | ||
1568 | /* Add the standard VESA modes to our modelist | |
1569 | * Since we don't have EDID, there may be modes that | |
1570 | * overspec monitor and/or are incorrect aspect ratio, etc. | |
1571 | * But at least the user has a chance to choose | |
1572 | */ | |
1573 | for (i = 0; i < VESA_MODEDB_SIZE; i++) { | |
1574 | if (ufx_is_valid_mode((struct fb_videomode *) | |
1575 | &vesa_modes[i], info)) | |
1576 | fb_add_videomode(&vesa_modes[i], | |
1577 | &info->modelist); | |
1578 | } | |
1579 | ||
1580 | /* default to resolution safe for projectors | |
1581 | * (since they are most common case without EDID) | |
1582 | */ | |
1583 | fb_vmode.xres = 800; | |
1584 | fb_vmode.yres = 600; | |
1585 | fb_vmode.refresh = 60; | |
1586 | default_vmode = fb_find_nearest_mode(&fb_vmode, | |
1587 | &info->modelist); | |
1588 | } | |
1589 | ||
1590 | /* If we have good mode and no active clients */ | |
1591 | if ((default_vmode != NULL) && (dev->fb_count == 0)) { | |
1592 | ||
1593 | fb_videomode_to_var(&info->var, default_vmode); | |
1594 | ufx_var_color_format(&info->var); | |
1595 | ||
1596 | /* with mode size info, we can now alloc our framebuffer */ | |
1597 | memcpy(&info->fix, &ufx_fix, sizeof(ufx_fix)); | |
1598 | info->fix.line_length = info->var.xres * | |
1599 | (info->var.bits_per_pixel / 8); | |
1600 | ||
1601 | result = ufx_realloc_framebuffer(dev, info); | |
1602 | ||
1603 | } else | |
1604 | result = -EINVAL; | |
1605 | ||
1606 | error: | |
1607 | if (edid && (dev->edid != edid)) | |
1608 | kfree(edid); | |
1609 | ||
1610 | if (info->dev) | |
1611 | mutex_unlock(&info->lock); | |
1612 | ||
1613 | return result; | |
1614 | } | |
1615 | ||
1616 | static int ufx_usb_probe(struct usb_interface *interface, | |
1617 | const struct usb_device_id *id) | |
1618 | { | |
1619 | struct usb_device *usbdev; | |
1620 | struct ufx_data *dev; | |
1621 | struct fb_info *info = 0; | |
1622 | int retval = -ENOMEM; | |
1623 | u32 id_rev, fpga_rev; | |
1624 | ||
1625 | /* usb initialization */ | |
1626 | usbdev = interface_to_usbdev(interface); | |
1627 | BUG_ON(!usbdev); | |
1628 | ||
1629 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | |
1630 | if (dev == NULL) { | |
1631 | dev_err(&usbdev->dev, "ufx_usb_probe: failed alloc of dev struct\n"); | |
1632 | goto error; | |
1633 | } | |
1634 | ||
1635 | /* we need to wait for both usb and fbdev to spin down on disconnect */ | |
1636 | kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */ | |
1637 | kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */ | |
1638 | ||
1639 | dev->udev = usbdev; | |
1640 | dev->gdev = &usbdev->dev; /* our generic struct device * */ | |
1641 | usb_set_intfdata(interface, dev); | |
1642 | ||
1643 | dev_dbg(dev->gdev, "%s %s - serial #%s\n", | |
1644 | usbdev->manufacturer, usbdev->product, usbdev->serial); | |
1645 | dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", | |
1646 | usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | |
1647 | usbdev->descriptor.bcdDevice, dev); | |
1648 | dev_dbg(dev->gdev, "console enable=%d\n", console); | |
1649 | dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); | |
1650 | ||
1651 | if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { | |
1652 | retval = -ENOMEM; | |
1653 | dev_err(dev->gdev, "ufx_alloc_urb_list failed\n"); | |
1654 | goto error; | |
1655 | } | |
1656 | ||
1657 | /* We don't register a new USB class. Our client interface is fbdev */ | |
1658 | ||
1659 | /* allocates framebuffer driver structure, not framebuffer memory */ | |
1660 | info = framebuffer_alloc(0, &usbdev->dev); | |
1661 | if (!info) { | |
1662 | retval = -ENOMEM; | |
1663 | dev_err(dev->gdev, "framebuffer_alloc failed\n"); | |
1664 | goto error; | |
1665 | } | |
1666 | ||
1667 | dev->info = info; | |
1668 | info->par = dev; | |
1669 | info->pseudo_palette = dev->pseudo_palette; | |
1670 | info->fbops = &ufx_ops; | |
1671 | ||
1672 | retval = fb_alloc_cmap(&info->cmap, 256, 0); | |
1673 | if (retval < 0) { | |
1674 | dev_err(dev->gdev, "fb_alloc_cmap failed %x\n", retval); | |
1675 | goto error; | |
1676 | } | |
1677 | ||
1678 | INIT_DELAYED_WORK(&dev->free_framebuffer_work, | |
1679 | ufx_free_framebuffer_work); | |
1680 | ||
1681 | INIT_LIST_HEAD(&info->modelist); | |
1682 | ||
1683 | retval = ufx_reg_read(dev, 0x3000, &id_rev); | |
1684 | check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); | |
1685 | dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); | |
1686 | ||
1687 | retval = ufx_reg_read(dev, 0x3004, &fpga_rev); | |
1688 | check_warn_goto_error(retval, "error %d reading 0x3004 register from device", retval); | |
1689 | dev_dbg(dev->gdev, "FPGA_REV register value 0x%08x", fpga_rev); | |
1690 | ||
1691 | dev_dbg(dev->gdev, "resetting device"); | |
1692 | retval = ufx_lite_reset(dev); | |
1693 | check_warn_goto_error(retval, "error %d resetting device", retval); | |
1694 | ||
1695 | dev_dbg(dev->gdev, "configuring system clock"); | |
1696 | retval = ufx_config_sys_clk(dev); | |
1697 | check_warn_goto_error(retval, "error %d configuring system clock", retval); | |
1698 | ||
1699 | dev_dbg(dev->gdev, "configuring DDR2 controller"); | |
1700 | retval = ufx_config_ddr2(dev); | |
1701 | check_warn_goto_error(retval, "error %d initialising DDR2 controller", retval); | |
1702 | ||
1703 | dev_dbg(dev->gdev, "configuring I2C controller"); | |
1704 | retval = ufx_i2c_init(dev); | |
1705 | check_warn_goto_error(retval, "error %d initialising I2C controller", retval); | |
1706 | ||
1707 | dev_dbg(dev->gdev, "selecting display mode"); | |
1708 | retval = ufx_setup_modes(dev, info, NULL, 0); | |
1709 | check_warn_goto_error(retval, "unable to find common mode for display and adapter"); | |
1710 | ||
1711 | retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001); | |
1712 | check_warn_goto_error(retval, "error %d enabling graphics engine", retval); | |
1713 | ||
1714 | /* ready to begin using device */ | |
1715 | atomic_set(&dev->usb_active, 1); | |
1716 | ||
1717 | dev_dbg(dev->gdev, "checking var"); | |
1718 | retval = ufx_ops_check_var(&info->var, info); | |
1719 | check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval); | |
1720 | ||
1721 | dev_dbg(dev->gdev, "setting par"); | |
1722 | retval = ufx_ops_set_par(info); | |
1723 | check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval); | |
1724 | ||
1725 | dev_dbg(dev->gdev, "registering framebuffer"); | |
1726 | retval = register_framebuffer(info); | |
1727 | check_warn_goto_error(retval, "error %d register_framebuffer", retval); | |
1728 | ||
1729 | dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution." | |
1730 | " Using %dK framebuffer memory\n", info->node, | |
1731 | info->var.xres, info->var.yres, info->fix.smem_len >> 10); | |
1732 | ||
1733 | return 0; | |
1734 | ||
1735 | error: | |
1736 | if (dev) { | |
1737 | if (info) { | |
1738 | if (info->cmap.len != 0) | |
1739 | fb_dealloc_cmap(&info->cmap); | |
1740 | if (info->monspecs.modedb) | |
1741 | fb_destroy_modedb(info->monspecs.modedb); | |
1742 | if (info->screen_base) | |
1743 | vfree(info->screen_base); | |
1744 | ||
1745 | fb_destroy_modelist(&info->modelist); | |
1746 | ||
1747 | framebuffer_release(info); | |
1748 | } | |
1749 | ||
1750 | kref_put(&dev->kref, ufx_free); /* ref for framebuffer */ | |
1751 | kref_put(&dev->kref, ufx_free); /* last ref from kref_init */ | |
1752 | ||
1753 | /* dev has been deallocated. Do not dereference */ | |
1754 | } | |
1755 | ||
1756 | return retval; | |
1757 | } | |
1758 | ||
1759 | static void ufx_usb_disconnect(struct usb_interface *interface) | |
1760 | { | |
1761 | struct ufx_data *dev; | |
1762 | struct fb_info *info; | |
1763 | ||
1764 | dev = usb_get_intfdata(interface); | |
1765 | info = dev->info; | |
1766 | ||
1767 | pr_debug("USB disconnect starting\n"); | |
1768 | ||
1769 | /* we virtualize until all fb clients release. Then we free */ | |
1770 | dev->virtualized = true; | |
1771 | ||
1772 | /* When non-active we'll update virtual framebuffer, but no new urbs */ | |
1773 | atomic_set(&dev->usb_active, 0); | |
1774 | ||
1775 | usb_set_intfdata(interface, NULL); | |
1776 | ||
1777 | /* if clients still have us open, will be freed on last close */ | |
1778 | if (dev->fb_count == 0) | |
1779 | schedule_delayed_work(&dev->free_framebuffer_work, 0); | |
1780 | ||
1781 | /* release reference taken by kref_init in probe() */ | |
1782 | kref_put(&dev->kref, ufx_free); | |
1783 | ||
1784 | /* consider ufx_data freed */ | |
1785 | } | |
1786 | ||
1787 | static struct usb_driver ufx_driver = { | |
1788 | .name = "smscufx", | |
1789 | .probe = ufx_usb_probe, | |
1790 | .disconnect = ufx_usb_disconnect, | |
1791 | .id_table = id_table, | |
1792 | }; | |
1793 | ||
fe748483 | 1794 | module_usb_driver(ufx_driver); |
3c8a63e2 SG |
1795 | |
1796 | static void ufx_urb_completion(struct urb *urb) | |
1797 | { | |
1798 | struct urb_node *unode = urb->context; | |
1799 | struct ufx_data *dev = unode->dev; | |
1800 | unsigned long flags; | |
1801 | ||
1802 | /* sync/async unlink faults aren't errors */ | |
1803 | if (urb->status) { | |
1804 | if (!(urb->status == -ENOENT || | |
1805 | urb->status == -ECONNRESET || | |
1806 | urb->status == -ESHUTDOWN)) { | |
1807 | pr_err("%s - nonzero write bulk status received: %d\n", | |
1808 | __func__, urb->status); | |
1809 | atomic_set(&dev->lost_pixels, 1); | |
1810 | } | |
1811 | } | |
1812 | ||
1813 | urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */ | |
1814 | ||
1815 | spin_lock_irqsave(&dev->urbs.lock, flags); | |
1816 | list_add_tail(&unode->entry, &dev->urbs.list); | |
1817 | dev->urbs.available++; | |
1818 | spin_unlock_irqrestore(&dev->urbs.lock, flags); | |
1819 | ||
1820 | /* When using fb_defio, we deadlock if up() is called | |
1821 | * while another is waiting. So queue to another process */ | |
1822 | if (fb_defio) | |
1823 | schedule_delayed_work(&unode->release_urb_work, 0); | |
1824 | else | |
1825 | up(&dev->urbs.limit_sem); | |
1826 | } | |
1827 | ||
1828 | static void ufx_free_urb_list(struct ufx_data *dev) | |
1829 | { | |
1830 | int count = dev->urbs.count; | |
1831 | struct list_head *node; | |
1832 | struct urb_node *unode; | |
1833 | struct urb *urb; | |
1834 | int ret; | |
1835 | unsigned long flags; | |
1836 | ||
1837 | pr_debug("Waiting for completes and freeing all render urbs\n"); | |
1838 | ||
1839 | /* keep waiting and freeing, until we've got 'em all */ | |
1840 | while (count--) { | |
1841 | /* Getting interrupted means a leak, but ok at shutdown*/ | |
1842 | ret = down_interruptible(&dev->urbs.limit_sem); | |
1843 | if (ret) | |
1844 | break; | |
1845 | ||
1846 | spin_lock_irqsave(&dev->urbs.lock, flags); | |
1847 | ||
1848 | node = dev->urbs.list.next; /* have reserved one with sem */ | |
1849 | list_del_init(node); | |
1850 | ||
1851 | spin_unlock_irqrestore(&dev->urbs.lock, flags); | |
1852 | ||
1853 | unode = list_entry(node, struct urb_node, entry); | |
1854 | urb = unode->urb; | |
1855 | ||
1856 | /* Free each separately allocated piece */ | |
1857 | usb_free_coherent(urb->dev, dev->urbs.size, | |
1858 | urb->transfer_buffer, urb->transfer_dma); | |
1859 | usb_free_urb(urb); | |
1860 | kfree(node); | |
1861 | } | |
1862 | } | |
1863 | ||
1864 | static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size) | |
1865 | { | |
1866 | int i = 0; | |
1867 | struct urb *urb; | |
1868 | struct urb_node *unode; | |
1869 | char *buf; | |
1870 | ||
1871 | spin_lock_init(&dev->urbs.lock); | |
1872 | ||
1873 | dev->urbs.size = size; | |
1874 | INIT_LIST_HEAD(&dev->urbs.list); | |
1875 | ||
1876 | while (i < count) { | |
1877 | unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); | |
1878 | if (!unode) | |
1879 | break; | |
1880 | unode->dev = dev; | |
1881 | ||
1882 | INIT_DELAYED_WORK(&unode->release_urb_work, | |
1883 | ufx_release_urb_work); | |
1884 | ||
1885 | urb = usb_alloc_urb(0, GFP_KERNEL); | |
1886 | if (!urb) { | |
1887 | kfree(unode); | |
1888 | break; | |
1889 | } | |
1890 | unode->urb = urb; | |
1891 | ||
1892 | buf = usb_alloc_coherent(dev->udev, size, GFP_KERNEL, | |
1893 | &urb->transfer_dma); | |
1894 | if (!buf) { | |
1895 | kfree(unode); | |
1896 | usb_free_urb(urb); | |
1897 | break; | |
1898 | } | |
1899 | ||
1900 | /* urb->transfer_buffer_length set to actual before submit */ | |
1901 | usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1), | |
1902 | buf, size, ufx_urb_completion, unode); | |
1903 | urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | |
1904 | ||
1905 | list_add_tail(&unode->entry, &dev->urbs.list); | |
1906 | ||
1907 | i++; | |
1908 | } | |
1909 | ||
1910 | sema_init(&dev->urbs.limit_sem, i); | |
1911 | dev->urbs.count = i; | |
1912 | dev->urbs.available = i; | |
1913 | ||
1914 | pr_debug("allocated %d %d byte urbs\n", i, (int) size); | |
1915 | ||
1916 | return i; | |
1917 | } | |
1918 | ||
1919 | static struct urb *ufx_get_urb(struct ufx_data *dev) | |
1920 | { | |
1921 | int ret = 0; | |
1922 | struct list_head *entry; | |
1923 | struct urb_node *unode; | |
1924 | struct urb *urb = NULL; | |
1925 | unsigned long flags; | |
1926 | ||
1927 | /* Wait for an in-flight buffer to complete and get re-queued */ | |
1928 | ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT); | |
1929 | if (ret) { | |
1930 | atomic_set(&dev->lost_pixels, 1); | |
1931 | pr_warn("wait for urb interrupted: %x available: %d\n", | |
1932 | ret, dev->urbs.available); | |
1933 | goto error; | |
1934 | } | |
1935 | ||
1936 | spin_lock_irqsave(&dev->urbs.lock, flags); | |
1937 | ||
1938 | BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */ | |
1939 | entry = dev->urbs.list.next; | |
1940 | list_del_init(entry); | |
1941 | dev->urbs.available--; | |
1942 | ||
1943 | spin_unlock_irqrestore(&dev->urbs.lock, flags); | |
1944 | ||
1945 | unode = list_entry(entry, struct urb_node, entry); | |
1946 | urb = unode->urb; | |
1947 | ||
1948 | error: | |
1949 | return urb; | |
1950 | } | |
1951 | ||
1952 | static int ufx_submit_urb(struct ufx_data *dev, struct urb *urb, size_t len) | |
1953 | { | |
1954 | int ret; | |
1955 | ||
1956 | BUG_ON(len > dev->urbs.size); | |
1957 | ||
1958 | urb->transfer_buffer_length = len; /* set to actual payload len */ | |
1959 | ret = usb_submit_urb(urb, GFP_KERNEL); | |
1960 | if (ret) { | |
1961 | ufx_urb_completion(urb); /* because no one else will */ | |
1962 | atomic_set(&dev->lost_pixels, 1); | |
1963 | pr_err("usb_submit_urb error %x\n", ret); | |
1964 | } | |
1965 | return ret; | |
1966 | } | |
1967 | ||
1968 | module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | |
1969 | MODULE_PARM_DESC(console, "Allow fbcon to be used on this display"); | |
1970 | ||
1971 | module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | |
1972 | MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support"); | |
1973 | ||
90b24cfb | 1974 | MODULE_AUTHOR("Steve Glendinning <[email protected]>"); |
3c8a63e2 SG |
1975 | MODULE_DESCRIPTION("SMSC UFX kernel framebuffer driver"); |
1976 | MODULE_LICENSE("GPL"); |