Fix common misspellings
[deliverable/linux.git] / drivers / staging / memrar / memrar_handler.c
1 /*
2 * memrar_handler 1.0: An Intel restricted access region handler device
3 *
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
20 *
21 * -------------------------------------------------------------------
22 *
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
25 * devices.
26 *
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
30 *
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
35 *
36 * RAR memory itself is never accessed directly by the RAR
37 * handler.
38 */
39
40 #include <linux/miscdevice.h>
41 #include <linux/fs.h>
42 #include <linux/slab.h>
43 #include <linux/kref.h>
44 #include <linux/mutex.h>
45 #include <linux/kernel.h>
46 #include <linux/uaccess.h>
47 #include <linux/mm.h>
48 #include <linux/ioport.h>
49 #include <linux/io.h>
50 #include <linux/rar_register.h>
51
52 #include "memrar.h"
53 #include "memrar_allocator.h"
54
55
56 #define MEMRAR_VER "1.0"
57
58 /*
59 * Moorestown supports three restricted access regions.
60 *
61 * We only care about the first two, video and audio. The third,
62 * reserved for Chaabi and the P-unit, will be handled by their
63 * respective drivers.
64 */
65 #define MRST_NUM_RAR 2
66
67 /* ---------------- -------------------- ------------------- */
68
69 /**
70 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
71 * @list: Linked list of memrar_buffer_info objects.
72 * @buffer: Core RAR buffer information.
73 * @refcount: Reference count.
74 * @owner: File handle corresponding to process that reserved the
75 * block of memory in RAR. This will be zero for buffers
76 * allocated by other drivers instead of by a user space
77 * process.
78 *
79 * This structure encapsulates a link list of RAR buffers, as well as
80 * other characteristics specific to a given list node, such as the
81 * reference count on the corresponding RAR buffer.
82 */
83 struct memrar_buffer_info {
84 struct list_head list;
85 struct RAR_buffer buffer;
86 struct kref refcount;
87 struct file *owner;
88 };
89
90 /**
91 * struct memrar_rar_info - characteristics of a given RAR
92 * @base: Base bus address of the RAR.
93 * @length: Length of the RAR.
94 * @iobase: Virtual address of RAR mapped into kernel.
95 * @allocator: Allocator associated with the RAR. Note the allocator
96 * "capacity" may be smaller than the RAR length if the
97 * length is not a multiple of the configured allocator
98 * block size.
99 * @buffers: Table that keeps track of all reserved RAR buffers.
100 * @lock: Lock used to synchronize access to RAR-specific data
101 * structures.
102 *
103 * Each RAR has an associated memrar_rar_info structure that describes
104 * where in memory the RAR is located, how large it is, and a list of
105 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
106 * associated with it to reduce lock contention when operations on
107 * multiple RARs are performed in parallel.
108 */
109 struct memrar_rar_info {
110 dma_addr_t base;
111 unsigned long length;
112 void __iomem *iobase;
113 struct memrar_allocator *allocator;
114 struct memrar_buffer_info buffers;
115 struct mutex lock;
116 int allocated; /* True if we own this RAR */
117 };
118
119 /*
120 * Array of RAR characteristics.
121 */
122 static struct memrar_rar_info memrars[MRST_NUM_RAR];
123
124 /* ---------------- -------------------- ------------------- */
125
126 /* Validate RAR type. */
127 static inline int memrar_is_valid_rar_type(u32 type)
128 {
129 return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
130 }
131
132 /* Check if an address/handle falls with the given RAR memory range. */
133 static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
134 u32 vaddr)
135 {
136 unsigned long const iobase = (unsigned long) (rar->iobase);
137 return (vaddr >= iobase && vaddr < iobase + rar->length);
138 }
139
140 /* Retrieve RAR information associated with the given handle. */
141 static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
142 {
143 int i;
144 for (i = 0; i < MRST_NUM_RAR; ++i) {
145 struct memrar_rar_info * const rar = &memrars[i];
146 if (memrar_handle_in_range(rar, vaddr))
147 return rar;
148 }
149
150 return NULL;
151 }
152
153 /**
154 * memrar_get_bus address - handle to bus address
155 *
156 * Retrieve bus address from given handle.
157 *
158 * Returns address corresponding to given handle. Zero if handle is
159 * invalid.
160 */
161 static dma_addr_t memrar_get_bus_address(
162 struct memrar_rar_info *rar,
163 u32 vaddr)
164 {
165 unsigned long const iobase = (unsigned long) (rar->iobase);
166
167 if (!memrar_handle_in_range(rar, vaddr))
168 return 0;
169
170 /*
171 * An assumption is made that the virtual address offset is
172 * the same as the bus address offset, at least based on the
173 * way this driver is implemented. For example, vaddr + 2 ==
174 * baddr + 2.
175 *
176 * @todo Is that a valid assumption?
177 */
178 return rar->base + (vaddr - iobase);
179 }
180
181 /**
182 * memrar_get_physical_address - handle to physical address
183 *
184 * Retrieve physical address from given handle.
185 *
186 * Returns address corresponding to given handle. Zero if handle is
187 * invalid.
188 */
189 static dma_addr_t memrar_get_physical_address(
190 struct memrar_rar_info *rar,
191 u32 vaddr)
192 {
193 /*
194 * @todo This assumes that the bus address and physical
195 * address are the same. That is true for Moorestown
196 * but not necessarily on other platforms. This
197 * deficiency should be addressed at some point.
198 */
199 return memrar_get_bus_address(rar, vaddr);
200 }
201
202 /**
203 * memrar_release_block - release a block to the pool
204 * @kref: kref of block
205 *
206 * Core block release code. A node has hit zero references so can
207 * be released and the lists must be updated.
208 *
209 * Note: This code removes the node from a list. Make sure any list
210 * iteration is performed using list_for_each_safe().
211 */
212 static void memrar_release_block_i(struct kref *ref)
213 {
214 /*
215 * Last reference is being released. Remove from the table,
216 * and reclaim resources.
217 */
218
219 struct memrar_buffer_info * const node =
220 container_of(ref, struct memrar_buffer_info, refcount);
221
222 struct RAR_block_info * const user_info =
223 &node->buffer.info;
224
225 struct memrar_allocator * const allocator =
226 memrars[user_info->type].allocator;
227
228 list_del(&node->list);
229
230 memrar_allocator_free(allocator, user_info->handle);
231
232 kfree(node);
233 }
234
235 /**
236 * memrar_init_rar_resources - configure a RAR
237 * @rarnum: rar that has been allocated
238 * @devname: name of our device
239 *
240 * Initialize RAR parameters, such as bus addresses, etc and make
241 * the resource accessible.
242 */
243 static int memrar_init_rar_resources(int rarnum, char const *devname)
244 {
245 /* ---- Sanity Checks ----
246 * 1. RAR bus addresses in both Lincroft and Langwell RAR
247 * registers should be the same.
248 * a. There's no way we can do this through IA.
249 *
250 * 2. Secure device ID in Langwell RAR registers should be set
251 * appropriately, e.g. only LPE DMA for the audio RAR, and
252 * security for the other Langwell based RAR registers.
253 * a. There's no way we can do this through IA.
254 *
255 * 3. Audio and video RAR registers and RAR access should be
256 * locked down. If not, enable RAR access control. Except
257 * for debugging purposes, there is no reason for them to
258 * be unlocked.
259 * a. We can only do this for the Lincroft (IA) side.
260 *
261 * @todo Should the RAR handler driver even be aware of audio
262 * and video RAR settings?
263 */
264
265 /*
266 * RAR buffer block size.
267 *
268 * We choose it to be the size of a page to simplify the
269 * /dev/memrar mmap() implementation and usage. Otherwise
270 * paging is not involved once an RAR is locked down.
271 */
272 static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
273
274 dma_addr_t low, high;
275 struct memrar_rar_info * const rar = &memrars[rarnum];
276
277 BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
278 BUG_ON(!memrar_is_valid_rar_type(rarnum));
279 BUG_ON(rar->allocated);
280
281 if (rar_get_address(rarnum, &low, &high) != 0)
282 /* No RAR is available. */
283 return -ENODEV;
284
285 if (low == 0 || high == 0) {
286 rar->base = 0;
287 rar->length = 0;
288 rar->iobase = NULL;
289 rar->allocator = NULL;
290 return -ENOSPC;
291 }
292
293 /*
294 * @todo Verify that LNC and LNW RAR register contents
295 * addresses, security, etc are compatible and
296 * consistent).
297 */
298
299 rar->length = high - low + 1;
300
301 /* Claim RAR memory as our own. */
302 if (request_mem_region(low, rar->length, devname) == NULL) {
303 rar->length = 0;
304 pr_err("%s: Unable to claim RAR[%d] memory.\n",
305 devname, rarnum);
306 pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
307 return -EBUSY;
308 }
309
310 rar->base = low;
311
312 /*
313 * Now map it into the kernel address space.
314 *
315 * Note that the RAR memory may only be accessed by IA
316 * when debugging. Otherwise attempts to access the
317 * RAR memory when it is locked down will result in
318 * behavior similar to writing to /dev/null and
319 * reading from /dev/zero. This behavior is enforced
320 * by the hardware. Even if we don't access the
321 * memory, mapping it into the kernel provides us with
322 * a convenient RAR handle to bus address mapping.
323 */
324 rar->iobase = ioremap_nocache(rar->base, rar->length);
325 if (rar->iobase == NULL) {
326 pr_err("%s: Unable to map RAR memory.\n", devname);
327 release_mem_region(low, rar->length);
328 return -ENOMEM;
329 }
330
331 /* Initialize corresponding memory allocator. */
332 rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
333 rar->length, RAR_BLOCK_SIZE);
334 if (rar->allocator == NULL) {
335 iounmap(rar->iobase);
336 release_mem_region(low, rar->length);
337 return -ENOMEM;
338 }
339
340 pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
341 devname, rarnum, (unsigned long) low, (unsigned long) high);
342
343 pr_info("%s: BRAR[%d] size = %zu KiB\n",
344 devname, rarnum, rar->allocator->capacity / 1024);
345
346 rar->allocated = 1;
347 return 0;
348 }
349
350 /**
351 * memrar_fini_rar_resources - free up RAR resources
352 *
353 * Finalize RAR resources. Free up the resource tables, hand the memory
354 * back to the kernel, unmap the device and release the address space.
355 */
356 static void memrar_fini_rar_resources(void)
357 {
358 int z;
359 struct memrar_buffer_info *pos;
360 struct memrar_buffer_info *tmp;
361
362 /*
363 * @todo Do we need to hold a lock at this point in time?
364 * (module initialization failure or exit?)
365 */
366
367 for (z = MRST_NUM_RAR; z-- != 0; ) {
368 struct memrar_rar_info * const rar = &memrars[z];
369
370 if (!rar->allocated)
371 continue;
372
373 /* Clean up remaining resources. */
374
375 list_for_each_entry_safe(pos,
376 tmp,
377 &rar->buffers.list,
378 list) {
379 kref_put(&pos->refcount, memrar_release_block_i);
380 }
381
382 memrar_destroy_allocator(rar->allocator);
383 rar->allocator = NULL;
384
385 iounmap(rar->iobase);
386 release_mem_region(rar->base, rar->length);
387
388 rar->iobase = NULL;
389 rar->base = 0;
390 rar->length = 0;
391
392 unregister_rar(z);
393 }
394 }
395
396 /**
397 * memrar_reserve_block - handle an allocation request
398 * @request: block being requested
399 * @filp: owner it is tied to
400 *
401 * Allocate a block of the requested RAR. If successful return the
402 * request object filled in and zero, if not report an error code
403 */
404
405 static long memrar_reserve_block(struct RAR_buffer *request,
406 struct file *filp)
407 {
408 struct RAR_block_info * const rinfo = &request->info;
409 struct RAR_buffer *buffer;
410 struct memrar_buffer_info *buffer_info;
411 u32 handle;
412 struct memrar_rar_info *rar = NULL;
413
414 /* Prevent array overflow. */
415 if (!memrar_is_valid_rar_type(rinfo->type))
416 return -EINVAL;
417
418 rar = &memrars[rinfo->type];
419 if (!rar->allocated)
420 return -ENODEV;
421
422 /* Reserve memory in RAR. */
423 handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
424 if (handle == 0)
425 return -ENOMEM;
426
427 buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
428
429 if (buffer_info == NULL) {
430 memrar_allocator_free(rar->allocator, handle);
431 return -ENOMEM;
432 }
433
434 buffer = &buffer_info->buffer;
435 buffer->info.type = rinfo->type;
436 buffer->info.size = rinfo->size;
437
438 /* Memory handle corresponding to the bus address. */
439 buffer->info.handle = handle;
440 buffer->bus_address = memrar_get_bus_address(rar, handle);
441
442 /*
443 * Keep track of owner so that we can later cleanup if
444 * necessary.
445 */
446 buffer_info->owner = filp;
447
448 kref_init(&buffer_info->refcount);
449
450 mutex_lock(&rar->lock);
451 list_add(&buffer_info->list, &rar->buffers.list);
452 mutex_unlock(&rar->lock);
453
454 rinfo->handle = buffer->info.handle;
455 request->bus_address = buffer->bus_address;
456
457 return 0;
458 }
459
460 /**
461 * memrar_release_block - release a RAR block
462 * @addr: address in RAR space
463 *
464 * Release a previously allocated block. Releases act on complete
465 * blocks, partially freeing a block is not supported
466 */
467
468 static long memrar_release_block(u32 addr)
469 {
470 struct memrar_buffer_info *pos;
471 struct memrar_buffer_info *tmp;
472 struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
473 long result = -EINVAL;
474
475 if (rar == NULL)
476 return -ENOENT;
477
478 mutex_lock(&rar->lock);
479
480 /*
481 * Iterate through the buffer list to find the corresponding
482 * buffer to be released.
483 */
484 list_for_each_entry_safe(pos,
485 tmp,
486 &rar->buffers.list,
487 list) {
488 struct RAR_block_info * const info =
489 &pos->buffer.info;
490
491 /*
492 * Take into account handle offsets that may have been
493 * added to the base handle, such as in the following
494 * scenario:
495 *
496 * u32 handle = base + offset;
497 * rar_handle_to_bus(handle);
498 * rar_release(handle);
499 */
500 if (addr >= info->handle
501 && addr < (info->handle + info->size)
502 && memrar_is_valid_rar_type(info->type)) {
503 kref_put(&pos->refcount, memrar_release_block_i);
504 result = 0;
505 break;
506 }
507 }
508
509 mutex_unlock(&rar->lock);
510
511 return result;
512 }
513
514 /**
515 * memrar_get_stats - read statistics for a RAR
516 * @r: statistics to be filled in
517 *
518 * Returns the statistics data for the RAR, or an error code if
519 * the request cannot be completed
520 */
521 static long memrar_get_stat(struct RAR_stat *r)
522 {
523 struct memrar_allocator *allocator;
524
525 if (!memrar_is_valid_rar_type(r->type))
526 return -EINVAL;
527
528 if (!memrars[r->type].allocated)
529 return -ENODEV;
530
531 allocator = memrars[r->type].allocator;
532
533 BUG_ON(allocator == NULL);
534
535 /*
536 * Allocator capacity doesn't change over time. No
537 * need to synchronize.
538 */
539 r->capacity = allocator->capacity;
540
541 mutex_lock(&allocator->lock);
542 r->largest_block_size = allocator->largest_free_area;
543 mutex_unlock(&allocator->lock);
544 return 0;
545 }
546
547 /**
548 * memrar_ioctl - ioctl callback
549 * @filp: file issuing the request
550 * @cmd: command
551 * @arg: pointer to control information
552 *
553 * Perform one of the ioctls supported by the memrar device
554 */
555
556 static long memrar_ioctl(struct file *filp,
557 unsigned int cmd,
558 unsigned long arg)
559 {
560 void __user *argp = (void __user *)arg;
561 long result = 0;
562
563 struct RAR_buffer buffer;
564 struct RAR_block_info * const request = &buffer.info;
565 struct RAR_stat rar_info;
566 u32 rar_handle;
567
568 switch (cmd) {
569 case RAR_HANDLER_RESERVE:
570 if (copy_from_user(request,
571 argp,
572 sizeof(*request)))
573 return -EFAULT;
574
575 result = memrar_reserve_block(&buffer, filp);
576 if (result != 0)
577 return result;
578
579 return copy_to_user(argp, request, sizeof(*request));
580
581 case RAR_HANDLER_RELEASE:
582 if (copy_from_user(&rar_handle,
583 argp,
584 sizeof(rar_handle)))
585 return -EFAULT;
586
587 return memrar_release_block(rar_handle);
588
589 case RAR_HANDLER_STAT:
590 if (copy_from_user(&rar_info,
591 argp,
592 sizeof(rar_info)))
593 return -EFAULT;
594
595 /*
596 * Populate the RAR_stat structure based on the RAR
597 * type given by the user
598 */
599 if (memrar_get_stat(&rar_info) != 0)
600 return -EINVAL;
601
602 /*
603 * @todo Do we need to verify destination pointer
604 * "argp" is non-zero? Is that already done by
605 * copy_to_user()?
606 */
607 return copy_to_user(argp,
608 &rar_info,
609 sizeof(rar_info)) ? -EFAULT : 0;
610
611 default:
612 return -ENOTTY;
613 }
614
615 return 0;
616 }
617
618 /**
619 * memrar_mmap - mmap helper for deubgging
620 * @filp: handle doing the mapping
621 * @vma: memory area
622 *
623 * Support the mmap operation on the RAR space for debugging systems
624 * when the memory is not locked down.
625 */
626
627 static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
628 {
629 /*
630 * This mmap() implementation is predominantly useful for
631 * debugging since the CPU will be prevented from accessing
632 * RAR memory by the hardware when RAR is properly locked
633 * down.
634 *
635 * In order for this implementation to be useful RAR memory
636 * must be not be locked down. However, we only want to do
637 * that when debugging. DO NOT leave RAR memory unlocked in a
638 * deployed device that utilizes RAR.
639 */
640
641 size_t const size = vma->vm_end - vma->vm_start;
642
643 /* Users pass the RAR handle as the mmap() offset parameter. */
644 unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
645
646 struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
647 unsigned long pfn;
648
649 /* Only allow priviledged apps to go poking around this way */
650 if (!capable(CAP_SYS_RAWIO))
651 return -EPERM;
652
653 /* Invalid RAR handle or size passed to mmap(). */
654 if (rar == NULL
655 || handle == 0
656 || size > (handle - (unsigned long) rar->iobase))
657 return -EINVAL;
658
659 /*
660 * Retrieve physical address corresponding to the RAR handle,
661 * and convert it to a page frame.
662 */
663 pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
664
665
666 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
667 handle,
668 handle + size);
669
670 /*
671 * Map RAR memory into user space. This is really only useful
672 * for debugging purposes since the memory won't be
673 * accessible, i.e. reads return zero and writes are ignored,
674 * when RAR access control is enabled.
675 */
676 if (remap_pfn_range(vma,
677 vma->vm_start,
678 pfn,
679 size,
680 vma->vm_page_prot))
681 return -EAGAIN;
682
683 /* vma->vm_ops = &memrar_mem_ops; */
684
685 return 0;
686 }
687
688 /**
689 * memrar_open - device open method
690 * @inode: inode to open
691 * @filp: file handle
692 *
693 * As we support multiple arbitrary opens there is no work to be done
694 * really.
695 */
696
697 static int memrar_open(struct inode *inode, struct file *filp)
698 {
699 nonseekable_open(inode, filp);
700 return 0;
701 }
702
703 /**
704 * memrar_release - close method for miscev
705 * @inode: inode of device
706 * @filp: handle that is going away
707 *
708 * Free up all the regions that belong to this file handle. We use
709 * the handle as a natural Linux style 'lifetime' indicator and to
710 * ensure resources are not leaked when their owner explodes in an
711 * unplanned fashion.
712 */
713
714 static int memrar_release(struct inode *inode, struct file *filp)
715 {
716 /* Free all regions associated with the given file handle. */
717
718 struct memrar_buffer_info *pos;
719 struct memrar_buffer_info *tmp;
720 int z;
721
722 for (z = 0; z != MRST_NUM_RAR; ++z) {
723 struct memrar_rar_info * const rar = &memrars[z];
724
725 mutex_lock(&rar->lock);
726
727 list_for_each_entry_safe(pos,
728 tmp,
729 &rar->buffers.list,
730 list) {
731 if (filp == pos->owner)
732 kref_put(&pos->refcount,
733 memrar_release_block_i);
734 }
735
736 mutex_unlock(&rar->lock);
737 }
738
739 return 0;
740 }
741
742 /**
743 * rar_reserve - reserve RAR memory
744 * @buffers: buffers to reserve
745 * @count: number wanted
746 *
747 * Reserve a series of buffers in the RAR space. Returns the number of
748 * buffers successfully allocated
749 */
750
751 size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
752 {
753 struct RAR_buffer * const end =
754 (buffers == NULL ? buffers : buffers + count);
755 struct RAR_buffer *i;
756
757 size_t reserve_count = 0;
758
759 for (i = buffers; i != end; ++i) {
760 if (memrar_reserve_block(i, NULL) == 0)
761 ++reserve_count;
762 else
763 i->bus_address = 0;
764 }
765
766 return reserve_count;
767 }
768 EXPORT_SYMBOL(rar_reserve);
769
770 /**
771 * rar_release - return RAR buffers
772 * @buffers: buffers to release
773 * @size: size of released block
774 *
775 * Return a set of buffers to the RAR pool
776 */
777
778 size_t rar_release(struct RAR_buffer *buffers, size_t count)
779 {
780 struct RAR_buffer * const end =
781 (buffers == NULL ? buffers : buffers + count);
782 struct RAR_buffer *i;
783
784 size_t release_count = 0;
785
786 for (i = buffers; i != end; ++i) {
787 u32 * const handle = &i->info.handle;
788 if (memrar_release_block(*handle) == 0) {
789 /*
790 * @todo We assume we should do this each time
791 * the ref count is decremented. Should
792 * we instead only do this when the ref
793 * count has dropped to zero, and the
794 * buffer has been completely
795 * released/unmapped?
796 */
797 *handle = 0;
798 ++release_count;
799 }
800 }
801
802 return release_count;
803 }
804 EXPORT_SYMBOL(rar_release);
805
806 /**
807 * rar_handle_to_bus - RAR to bus address
808 * @buffers: RAR buffer structure
809 * @count: number of buffers to convert
810 *
811 * Turn a list of RAR handle mappings into actual bus addresses. Note
812 * that when the device is locked down the bus addresses in question
813 * are not CPU accessible.
814 */
815
816 size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
817 {
818 struct RAR_buffer * const end =
819 (buffers == NULL ? buffers : buffers + count);
820 struct RAR_buffer *i;
821 struct memrar_buffer_info *pos;
822
823 size_t conversion_count = 0;
824
825 /*
826 * Find all bus addresses corresponding to the given handles.
827 *
828 * @todo Not liking this nested loop. Optimize.
829 */
830 for (i = buffers; i != end; ++i) {
831 struct memrar_rar_info * const rar =
832 memrar_get_rar_info(i->info.handle);
833
834 /*
835 * Check if we have a bogus handle, and then continue
836 * with remaining buffers.
837 */
838 if (rar == NULL) {
839 i->bus_address = 0;
840 continue;
841 }
842
843 mutex_lock(&rar->lock);
844
845 list_for_each_entry(pos, &rar->buffers.list, list) {
846 struct RAR_block_info * const user_info =
847 &pos->buffer.info;
848
849 /*
850 * Take into account handle offsets that may
851 * have been added to the base handle, such as
852 * in the following scenario:
853 *
854 * u32 handle = base + offset;
855 * rar_handle_to_bus(handle);
856 */
857
858 if (i->info.handle >= user_info->handle
859 && i->info.handle < (user_info->handle
860 + user_info->size)) {
861 u32 const offset =
862 i->info.handle - user_info->handle;
863
864 i->info.type = user_info->type;
865 i->info.size = user_info->size - offset;
866 i->bus_address =
867 pos->buffer.bus_address
868 + offset;
869
870 /* Increment the reference count. */
871 kref_get(&pos->refcount);
872
873 ++conversion_count;
874 break;
875 } else {
876 i->bus_address = 0;
877 }
878 }
879
880 mutex_unlock(&rar->lock);
881 }
882
883 return conversion_count;
884 }
885 EXPORT_SYMBOL(rar_handle_to_bus);
886
887 static const struct file_operations memrar_fops = {
888 .owner = THIS_MODULE,
889 .unlocked_ioctl = memrar_ioctl,
890 .mmap = memrar_mmap,
891 .open = memrar_open,
892 .release = memrar_release,
893 .llseek = no_llseek,
894 };
895
896 static struct miscdevice memrar_miscdev = {
897 .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
898 .name = "memrar", /* /dev/memrar */
899 .fops = &memrar_fops
900 };
901
902 static char const banner[] __initdata =
903 KERN_INFO
904 "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
905
906 /**
907 * memrar_registration_callback - RAR obtained
908 * @rar: RAR number
909 *
910 * We have been granted ownership of the RAR. Add it to our memory
911 * management tables
912 */
913
914 static int memrar_registration_callback(unsigned long rar)
915 {
916 /*
917 * We initialize the RAR parameters early on so that we can
918 * discontinue memrar device initialization and registration
919 * if suitably configured RARs are not available.
920 */
921 return memrar_init_rar_resources(rar, memrar_miscdev.name);
922 }
923
924 /**
925 * memrar_init - initialise RAR support
926 *
927 * Initialise support for RAR handlers. This may get loaded before
928 * the RAR support is activated, but the callbacks on the registration
929 * will handle that situation for us anyway.
930 */
931
932 static int __init memrar_init(void)
933 {
934 int err;
935 int i;
936
937 printk(banner);
938
939 /*
940 * Some delayed initialization is performed in this driver.
941 * Make sure resources that are used during driver clean-up
942 * (e.g. during driver's release() function) are fully
943 * initialized before first use. This is particularly
944 * important for the case when the delayed initialization
945 * isn't completed, leaving behind a partially initialized
946 * driver.
947 *
948 * Such a scenario can occur when RAR is not available on the
949 * platform, and the driver is release()d.
950 */
951 for (i = 0; i != ARRAY_SIZE(memrars); ++i) {
952 struct memrar_rar_info * const rar = &memrars[i];
953 mutex_init(&rar->lock);
954 INIT_LIST_HEAD(&rar->buffers.list);
955 }
956
957 err = misc_register(&memrar_miscdev);
958 if (err)
959 return err;
960
961 /* Now claim the two RARs we want */
962 err = register_rar(0, memrar_registration_callback, 0);
963 if (err)
964 goto fail;
965
966 err = register_rar(1, memrar_registration_callback, 1);
967 if (err == 0)
968 return 0;
969
970 /* It is possible rar 0 registered and allocated resources then rar 1
971 failed so do a full resource free */
972 memrar_fini_rar_resources();
973 fail:
974 misc_deregister(&memrar_miscdev);
975 return err;
976 }
977
978 /**
979 * memrar_exit - unregister and unload
980 *
981 * Unregister the device and then unload any mappings and release
982 * the RAR resources
983 */
984
985 static void __exit memrar_exit(void)
986 {
987 misc_deregister(&memrar_miscdev);
988 memrar_fini_rar_resources();
989 }
990
991
992 module_init(memrar_init);
993 module_exit(memrar_exit);
994
995
996 MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
997 MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
998 MODULE_LICENSE("GPL");
999 MODULE_VERSION(MEMRAR_VER);
1000
1001
1002
1003 /*
1004 Local Variables:
1005 c-file-style: "linux"
1006 End:
1007 */
This page took 0.052674 seconds and 5 git commands to generate.