4 * COMEDI - Linux Control and Measurement Device Interface
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/vmalloc.h>
19 #include <linux/slab.h>
21 #include "comedidev.h"
22 #include "comedi_internal.h"
24 #ifdef PAGE_KERNEL_NOCACHE
25 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
27 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL
30 static void comedi_buf_map_kref_release(struct kref *kref)
32 struct comedi_buf_map *bm =
33 container_of(kref, struct comedi_buf_map, refcount);
34 struct comedi_buf_page *buf;
38 for (i = 0; i < bm->n_pages; i++) {
39 buf = &bm->page_list[i];
40 clear_bit(PG_reserved,
41 &(virt_to_page(buf->virt_addr)->flags));
42 if (bm->dma_dir != DMA_NONE) {
44 dma_free_coherent(bm->dma_hw_dev,
50 free_page((unsigned long)buf->virt_addr);
55 if (bm->dma_dir != DMA_NONE)
56 put_device(bm->dma_hw_dev);
60 static void __comedi_buf_free(struct comedi_device *dev,
61 struct comedi_subdevice *s)
63 struct comedi_async *async = s->async;
65 if (async->prealloc_buf) {
66 vunmap(async->prealloc_buf);
67 async->prealloc_buf = NULL;
68 async->prealloc_bufsz = 0;
71 comedi_buf_map_put(async->buf_map);
72 async->buf_map = NULL;
75 static void __comedi_buf_alloc(struct comedi_device *dev,
76 struct comedi_subdevice *s,
79 struct comedi_async *async = s->async;
80 struct page **pages = NULL;
81 struct comedi_buf_map *bm;
82 struct comedi_buf_page *buf;
85 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
86 dev_err(dev->class_dev,
87 "dma buffer allocation not supported\n");
91 bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
96 kref_init(&bm->refcount);
97 bm->dma_dir = s->async_dma_dir;
98 if (bm->dma_dir != DMA_NONE)
99 /* Need ref to hardware device to free buffer later. */
100 bm->dma_hw_dev = get_device(dev->hw_dev);
102 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
104 pages = vmalloc(sizeof(struct page *) * n_pages);
109 for (i = 0; i < n_pages; i++) {
110 buf = &bm->page_list[i];
111 if (bm->dma_dir != DMA_NONE)
112 #ifdef CONFIG_HAS_DMA
113 buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
122 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
126 set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
128 pages[i] = virt_to_page(buf->virt_addr);
132 /* vmap the prealloc_buf if all the pages were allocated */
134 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
135 COMEDI_PAGE_PROTECTION);
140 void comedi_buf_map_get(struct comedi_buf_map *bm)
143 kref_get(&bm->refcount);
146 int comedi_buf_map_put(struct comedi_buf_map *bm)
149 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
153 bool comedi_buf_is_mmapped(struct comedi_async *async)
155 struct comedi_buf_map *bm = async->buf_map;
157 return bm && (atomic_read(&bm->refcount.refcount) > 1);
160 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
161 unsigned long new_size)
163 struct comedi_async *async = s->async;
165 /* Round up new_size to multiple of PAGE_SIZE */
166 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
168 /* if no change is required, do nothing */
169 if (async->prealloc_buf && async->prealloc_bufsz == new_size)
172 /* deallocate old buffer */
173 __comedi_buf_free(dev, s);
175 /* allocate new buffer */
177 unsigned n_pages = new_size >> PAGE_SHIFT;
179 __comedi_buf_alloc(dev, s, n_pages);
181 if (!async->prealloc_buf) {
182 /* allocation failed */
183 __comedi_buf_free(dev, s);
187 async->prealloc_bufsz = new_size;
192 void comedi_buf_reset(struct comedi_async *async)
194 async->buf_write_alloc_count = 0;
195 async->buf_write_count = 0;
196 async->buf_read_alloc_count = 0;
197 async->buf_read_count = 0;
199 async->buf_write_ptr = 0;
200 async->buf_read_ptr = 0;
203 async->scan_progress = 0;
204 async->munge_chan = 0;
205 async->munge_count = 0;
206 async->munge_ptr = 0;
211 static unsigned int comedi_buf_write_n_available(struct comedi_async *async)
213 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
215 return free_end - async->buf_write_alloc_count;
218 static unsigned int __comedi_buf_write_alloc(struct comedi_async *async,
222 unsigned int available = comedi_buf_write_n_available(async);
224 if (nbytes > available)
225 nbytes = strict ? 0 : available;
227 async->buf_write_alloc_count += nbytes;
230 * ensure the async buffer 'counts' are read and updated
231 * before we write data to the write-alloc'ed buffer space
238 /* allocates chunk for the writer from free buffer space */
239 unsigned int comedi_buf_write_alloc(struct comedi_async *async,
242 return __comedi_buf_write_alloc(async, nbytes, 0);
244 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
247 * munging is applied to data by core as it passes between user
250 static unsigned int comedi_buf_munge(struct comedi_async *async,
251 unsigned int num_bytes)
253 struct comedi_subdevice *s = async->subdevice;
254 unsigned int count = 0;
255 const unsigned num_sample_bytes = bytes_per_sample(s);
257 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
258 async->munge_count += num_bytes;
261 /* don't munge partial samples */
262 num_bytes -= num_bytes % num_sample_bytes;
263 while (count < num_bytes) {
264 int block_size = num_bytes - count;
265 unsigned int buf_end;
267 buf_end = async->prealloc_bufsz - async->munge_ptr;
268 if (block_size > buf_end)
269 block_size = buf_end;
271 s->munge(s->device, s,
272 async->prealloc_buf + async->munge_ptr,
273 block_size, async->munge_chan);
276 * ensure data is munged in buffer before the
277 * async buffer munge_count is incremented
281 async->munge_chan += block_size / num_sample_bytes;
282 async->munge_chan %= async->cmd.chanlist_len;
283 async->munge_count += block_size;
284 async->munge_ptr += block_size;
285 async->munge_ptr %= async->prealloc_bufsz;
293 unsigned int comedi_buf_write_n_allocated(struct comedi_async *async)
295 return async->buf_write_alloc_count - async->buf_write_count;
298 /* transfers a chunk from writer to filled buffer space */
299 unsigned int comedi_buf_write_free(struct comedi_async *async,
302 unsigned int allocated = comedi_buf_write_n_allocated(async);
304 if (nbytes > allocated)
307 async->buf_write_count += nbytes;
308 async->buf_write_ptr += nbytes;
309 comedi_buf_munge(async, async->buf_write_count - async->munge_count);
310 if (async->buf_write_ptr >= async->prealloc_bufsz)
311 async->buf_write_ptr %= async->prealloc_bufsz;
315 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
317 unsigned int comedi_buf_read_n_available(struct comedi_async *async)
324 num_bytes = async->munge_count - async->buf_read_count;
327 * ensure the async buffer 'counts' are read before we
328 * attempt to read data from the buffer
334 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
336 /* allocates a chunk for the reader from filled (and munged) buffer space */
337 unsigned int comedi_buf_read_alloc(struct comedi_async *async,
340 unsigned int available;
342 available = async->munge_count - async->buf_read_alloc_count;
343 if (nbytes > available)
346 async->buf_read_alloc_count += nbytes;
349 * ensure the async buffer 'counts' are read before we
350 * attempt to read data from the read-alloc'ed buffer space
356 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
358 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
360 return async->buf_read_alloc_count - async->buf_read_count;
363 /* transfers control of a chunk from reader to free buffer space */
364 unsigned int comedi_buf_read_free(struct comedi_async *async,
367 unsigned int allocated;
370 * ensure data has been read out of buffer before
371 * the async read count is incremented
375 allocated = comedi_buf_read_n_allocated(async);
376 if (nbytes > allocated)
379 async->buf_read_count += nbytes;
380 async->buf_read_ptr += nbytes;
381 async->buf_read_ptr %= async->prealloc_bufsz;
384 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
386 int comedi_buf_put(struct comedi_async *async, unsigned short x)
388 unsigned int n = __comedi_buf_write_alloc(async, sizeof(short), 1);
390 if (n < sizeof(short)) {
391 async->events |= COMEDI_CB_ERROR;
394 *(unsigned short *)(async->prealloc_buf + async->buf_write_ptr) = x;
395 comedi_buf_write_free(async, sizeof(short));
398 EXPORT_SYMBOL_GPL(comedi_buf_put);
400 int comedi_buf_get(struct comedi_async *async, unsigned short *x)
402 unsigned int n = comedi_buf_read_n_available(async);
404 if (n < sizeof(short))
406 comedi_buf_read_alloc(async, sizeof(short));
407 *x = *(unsigned short *)(async->prealloc_buf + async->buf_read_ptr);
408 comedi_buf_read_free(async, sizeof(short));
411 EXPORT_SYMBOL_GPL(comedi_buf_get);
413 void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
414 const void *data, unsigned int num_bytes)
416 unsigned int write_ptr = async->buf_write_ptr + offset;
418 if (write_ptr >= async->prealloc_bufsz)
419 write_ptr %= async->prealloc_bufsz;
422 unsigned int block_size;
424 if (write_ptr + num_bytes > async->prealloc_bufsz)
425 block_size = async->prealloc_bufsz - write_ptr;
427 block_size = num_bytes;
429 memcpy(async->prealloc_buf + write_ptr, data, block_size);
432 num_bytes -= block_size;
437 EXPORT_SYMBOL_GPL(comedi_buf_memcpy_to);
439 void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
440 void *dest, unsigned int nbytes)
443 unsigned int read_ptr = async->buf_read_ptr + offset;
445 if (read_ptr >= async->prealloc_bufsz)
446 read_ptr %= async->prealloc_bufsz;
449 unsigned int block_size;
451 src = async->prealloc_buf + read_ptr;
453 if (nbytes >= async->prealloc_bufsz - read_ptr)
454 block_size = async->prealloc_bufsz - read_ptr;
458 memcpy(dest, src, block_size);
459 nbytes -= block_size;
464 EXPORT_SYMBOL_GPL(comedi_buf_memcpy_from);