2 * memrar_handler 1.0: An Intel restricted access region handler device
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
21 * -------------------------------------------------------------------
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
36 * RAR memory itself is never accessed directly by the RAR
40 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/kref.h>
44 #include <linux/mutex.h>
45 #include <linux/kernel.h>
46 #include <linux/uaccess.h>
48 #include <linux/ioport.h>
50 #include <linux/rar_register.h>
53 #include "memrar_allocator.h"
56 #define MEMRAR_VER "1.0"
59 * Moorestown supports three restricted access regions.
61 * We only care about the first two, video and audio. The third,
62 * reserved for Chaabi and the P-unit, will be handled by their
65 #define MRST_NUM_RAR 2
67 /* ---------------- -------------------- ------------------- */
70 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
71 * @list: Linked list of memrar_buffer_info objects.
72 * @buffer: Core RAR buffer information.
73 * @refcount: Reference count.
74 * @owner: File handle corresponding to process that reserved the
75 * block of memory in RAR. This will be zero for buffers
76 * allocated by other drivers instead of by a user space
79 * This structure encapsulates a link list of RAR buffers, as well as
80 * other characteristics specific to a given list node, such as the
81 * reference count on the corresponding RAR buffer.
83 struct memrar_buffer_info
{
84 struct list_head list
;
85 struct RAR_buffer buffer
;
91 * struct memrar_rar_info - characteristics of a given RAR
92 * @base: Base bus address of the RAR.
93 * @length: Length of the RAR.
94 * @iobase: Virtual address of RAR mapped into kernel.
95 * @allocator: Allocator associated with the RAR. Note the allocator
96 * "capacity" may be smaller than the RAR length if the
97 * length is not a multiple of the configured allocator
99 * @buffers: Table that keeps track of all reserved RAR buffers.
100 * @lock: Lock used to synchronize access to RAR-specific data
103 * Each RAR has an associated memrar_rar_info structure that describes
104 * where in memory the RAR is located, how large it is, and a list of
105 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
106 * associated with it to reduce lock contention when operations on
107 * multiple RARs are performed in parallel.
109 struct memrar_rar_info
{
111 unsigned long length
;
112 void __iomem
*iobase
;
113 struct memrar_allocator
*allocator
;
114 struct memrar_buffer_info buffers
;
116 int allocated
; /* True if we own this RAR */
120 * Array of RAR characteristics.
122 static struct memrar_rar_info memrars
[MRST_NUM_RAR
];
124 /* ---------------- -------------------- ------------------- */
126 /* Validate RAR type. */
127 static inline int memrar_is_valid_rar_type(u32 type
)
129 return type
== RAR_TYPE_VIDEO
|| type
== RAR_TYPE_AUDIO
;
132 /* Check if an address/handle falls with the given RAR memory range. */
133 static inline int memrar_handle_in_range(struct memrar_rar_info
*rar
,
136 unsigned long const iobase
= (unsigned long) (rar
->iobase
);
137 return (vaddr
>= iobase
&& vaddr
< iobase
+ rar
->length
);
140 /* Retrieve RAR information associated with the given handle. */
141 static struct memrar_rar_info
*memrar_get_rar_info(u32 vaddr
)
144 for (i
= 0; i
< MRST_NUM_RAR
; ++i
) {
145 struct memrar_rar_info
* const rar
= &memrars
[i
];
146 if (memrar_handle_in_range(rar
, vaddr
))
154 * memrar_get_bus address - handle to bus address
156 * Retrieve bus address from given handle.
158 * Returns address corresponding to given handle. Zero if handle is
161 static dma_addr_t
memrar_get_bus_address(
162 struct memrar_rar_info
*rar
,
165 unsigned long const iobase
= (unsigned long) (rar
->iobase
);
167 if (!memrar_handle_in_range(rar
, vaddr
))
171 * An assumption is made that the virtual address offset is
172 * the same as the bus address offset, at least based on the
173 * way this driver is implemented. For example, vaddr + 2 ==
176 * @todo Is that a valid assumption?
178 return rar
->base
+ (vaddr
- iobase
);
182 * memrar_get_physical_address - handle to physical address
184 * Retrieve physical address from given handle.
186 * Returns address corresponding to given handle. Zero if handle is
189 static dma_addr_t
memrar_get_physical_address(
190 struct memrar_rar_info
*rar
,
194 * @todo This assumes that the bus address and physical
195 * address are the same. That is true for Moorestown
196 * but not necessarily on other platforms. This
197 * deficiency should be addressed at some point.
199 return memrar_get_bus_address(rar
, vaddr
);
203 * memrar_release_block - release a block to the pool
204 * @kref: kref of block
206 * Core block release code. A node has hit zero references so can
207 * be released and the lists must be updated.
209 * Note: This code removes the node from a list. Make sure any list
210 * iteration is performed using list_for_each_safe().
212 static void memrar_release_block_i(struct kref
*ref
)
215 * Last reference is being released. Remove from the table,
216 * and reclaim resources.
219 struct memrar_buffer_info
* const node
=
220 container_of(ref
, struct memrar_buffer_info
, refcount
);
222 struct RAR_block_info
* const user_info
=
225 struct memrar_allocator
* const allocator
=
226 memrars
[user_info
->type
].allocator
;
228 list_del(&node
->list
);
230 memrar_allocator_free(allocator
, user_info
->handle
);
236 * memrar_init_rar_resources - configure a RAR
237 * @rarnum: rar that has been allocated
238 * @devname: name of our device
240 * Initialize RAR parameters, such as bus addresses, etc and make
241 * the resource accessible.
243 static int memrar_init_rar_resources(int rarnum
, char const *devname
)
245 /* ---- Sanity Checks ----
246 * 1. RAR bus addresses in both Lincroft and Langwell RAR
247 * registers should be the same.
248 * a. There's no way we can do this through IA.
250 * 2. Secure device ID in Langwell RAR registers should be set
251 * appropriately, e.g. only LPE DMA for the audio RAR, and
252 * security for the other Langwell based RAR registers.
253 * a. There's no way we can do this through IA.
255 * 3. Audio and video RAR registers and RAR access should be
256 * locked down. If not, enable RAR access control. Except
257 * for debugging purposes, there is no reason for them to
259 * a. We can only do this for the Lincroft (IA) side.
261 * @todo Should the RAR handler driver even be aware of audio
262 * and video RAR settings?
266 * RAR buffer block size.
268 * We choose it to be the size of a page to simplify the
269 * /dev/memrar mmap() implementation and usage. Otherwise
270 * paging is not involved once an RAR is locked down.
272 static size_t const RAR_BLOCK_SIZE
= PAGE_SIZE
;
274 dma_addr_t low
, high
;
275 struct memrar_rar_info
* const rar
= &memrars
[rarnum
];
277 BUG_ON(MRST_NUM_RAR
!= ARRAY_SIZE(memrars
));
278 BUG_ON(!memrar_is_valid_rar_type(rarnum
));
279 BUG_ON(rar
->allocated
);
281 if (rar_get_address(rarnum
, &low
, &high
) != 0)
282 /* No RAR is available. */
285 if (low
== 0 || high
== 0) {
289 rar
->allocator
= NULL
;
294 * @todo Verify that LNC and LNW RAR register contents
295 * addresses, security, etc are compatible and
299 rar
->length
= high
- low
+ 1;
301 /* Claim RAR memory as our own. */
302 if (request_mem_region(low
, rar
->length
, devname
) == NULL
) {
304 pr_err("%s: Unable to claim RAR[%d] memory.\n",
306 pr_err("%s: RAR[%d] disabled.\n", devname
, rarnum
);
313 * Now map it into the kernel address space.
315 * Note that the RAR memory may only be accessed by IA
316 * when debugging. Otherwise attempts to access the
317 * RAR memory when it is locked down will result in
318 * behavior similar to writing to /dev/null and
319 * reading from /dev/zero. This behavior is enforced
320 * by the hardware. Even if we don't access the
321 * memory, mapping it into the kernel provides us with
322 * a convenient RAR handle to bus address mapping.
324 rar
->iobase
= ioremap_nocache(rar
->base
, rar
->length
);
325 if (rar
->iobase
== NULL
) {
326 pr_err("%s: Unable to map RAR memory.\n", devname
);
327 release_mem_region(low
, rar
->length
);
331 /* Initialize corresponding memory allocator. */
332 rar
->allocator
= memrar_create_allocator((unsigned long) rar
->iobase
,
333 rar
->length
, RAR_BLOCK_SIZE
);
334 if (rar
->allocator
== NULL
) {
335 iounmap(rar
->iobase
);
336 release_mem_region(low
, rar
->length
);
340 pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
341 devname
, rarnum
, (unsigned long) low
, (unsigned long) high
);
343 pr_info("%s: BRAR[%d] size = %zu KiB\n",
344 devname
, rarnum
, rar
->allocator
->capacity
/ 1024);
351 * memrar_fini_rar_resources - free up RAR resources
353 * Finalize RAR resources. Free up the resource tables, hand the memory
354 * back to the kernel, unmap the device and release the address space.
356 static void memrar_fini_rar_resources(void)
359 struct memrar_buffer_info
*pos
;
360 struct memrar_buffer_info
*tmp
;
363 * @todo Do we need to hold a lock at this point in time?
364 * (module initialization failure or exit?)
367 for (z
= MRST_NUM_RAR
; z
-- != 0; ) {
368 struct memrar_rar_info
* const rar
= &memrars
[z
];
373 /* Clean up remaining resources. */
375 list_for_each_entry_safe(pos
,
379 kref_put(&pos
->refcount
, memrar_release_block_i
);
382 memrar_destroy_allocator(rar
->allocator
);
383 rar
->allocator
= NULL
;
385 iounmap(rar
->iobase
);
386 release_mem_region(rar
->base
, rar
->length
);
397 * memrar_reserve_block - handle an allocation request
398 * @request: block being requested
399 * @filp: owner it is tied to
401 * Allocate a block of the requested RAR. If successful return the
402 * request object filled in and zero, if not report an error code
405 static long memrar_reserve_block(struct RAR_buffer
*request
,
408 struct RAR_block_info
* const rinfo
= &request
->info
;
409 struct RAR_buffer
*buffer
;
410 struct memrar_buffer_info
*buffer_info
;
412 struct memrar_rar_info
*rar
= NULL
;
414 /* Prevent array overflow. */
415 if (!memrar_is_valid_rar_type(rinfo
->type
))
418 rar
= &memrars
[rinfo
->type
];
422 /* Reserve memory in RAR. */
423 handle
= memrar_allocator_alloc(rar
->allocator
, rinfo
->size
);
427 buffer_info
= kmalloc(sizeof(*buffer_info
), GFP_KERNEL
);
429 if (buffer_info
== NULL
) {
430 memrar_allocator_free(rar
->allocator
, handle
);
434 buffer
= &buffer_info
->buffer
;
435 buffer
->info
.type
= rinfo
->type
;
436 buffer
->info
.size
= rinfo
->size
;
438 /* Memory handle corresponding to the bus address. */
439 buffer
->info
.handle
= handle
;
440 buffer
->bus_address
= memrar_get_bus_address(rar
, handle
);
443 * Keep track of owner so that we can later cleanup if
446 buffer_info
->owner
= filp
;
448 kref_init(&buffer_info
->refcount
);
450 mutex_lock(&rar
->lock
);
451 list_add(&buffer_info
->list
, &rar
->buffers
.list
);
452 mutex_unlock(&rar
->lock
);
454 rinfo
->handle
= buffer
->info
.handle
;
455 request
->bus_address
= buffer
->bus_address
;
461 * memrar_release_block - release a RAR block
462 * @addr: address in RAR space
464 * Release a previously allocated block. Releases act on complete
465 * blocks, partially freeing a block is not supported
468 static long memrar_release_block(u32 addr
)
470 struct memrar_buffer_info
*pos
;
471 struct memrar_buffer_info
*tmp
;
472 struct memrar_rar_info
* const rar
= memrar_get_rar_info(addr
);
473 long result
= -EINVAL
;
478 mutex_lock(&rar
->lock
);
481 * Iterate through the buffer list to find the corresponding
482 * buffer to be released.
484 list_for_each_entry_safe(pos
,
488 struct RAR_block_info
* const info
=
492 * Take into account handle offsets that may have been
493 * added to the base handle, such as in the following
496 * u32 handle = base + offset;
497 * rar_handle_to_bus(handle);
498 * rar_release(handle);
500 if (addr
>= info
->handle
501 && addr
< (info
->handle
+ info
->size
)
502 && memrar_is_valid_rar_type(info
->type
)) {
503 kref_put(&pos
->refcount
, memrar_release_block_i
);
509 mutex_unlock(&rar
->lock
);
515 * memrar_get_stats - read statistics for a RAR
516 * @r: statistics to be filled in
518 * Returns the statistics data for the RAR, or an error code if
519 * the request cannot be completed
521 static long memrar_get_stat(struct RAR_stat
*r
)
523 struct memrar_allocator
*allocator
;
525 if (!memrar_is_valid_rar_type(r
->type
))
528 if (!memrars
[r
->type
].allocated
)
531 allocator
= memrars
[r
->type
].allocator
;
533 BUG_ON(allocator
== NULL
);
536 * Allocator capacity doesn't change over time. No
537 * need to synchronize.
539 r
->capacity
= allocator
->capacity
;
541 mutex_lock(&allocator
->lock
);
542 r
->largest_block_size
= allocator
->largest_free_area
;
543 mutex_unlock(&allocator
->lock
);
548 * memrar_ioctl - ioctl callback
549 * @filp: file issuing the request
551 * @arg: pointer to control information
553 * Perform one of the ioctls supported by the memrar device
556 static long memrar_ioctl(struct file
*filp
,
560 void __user
*argp
= (void __user
*)arg
;
563 struct RAR_buffer buffer
;
564 struct RAR_block_info
* const request
= &buffer
.info
;
565 struct RAR_stat rar_info
;
569 case RAR_HANDLER_RESERVE
:
570 if (copy_from_user(request
,
575 result
= memrar_reserve_block(&buffer
, filp
);
579 return copy_to_user(argp
, request
, sizeof(*request
));
581 case RAR_HANDLER_RELEASE
:
582 if (copy_from_user(&rar_handle
,
587 return memrar_release_block(rar_handle
);
589 case RAR_HANDLER_STAT
:
590 if (copy_from_user(&rar_info
,
596 * Populate the RAR_stat structure based on the RAR
597 * type given by the user
599 if (memrar_get_stat(&rar_info
) != 0)
603 * @todo Do we need to verify destination pointer
604 * "argp" is non-zero? Is that already done by
607 return copy_to_user(argp
,
609 sizeof(rar_info
)) ? -EFAULT
: 0;
619 * memrar_mmap - mmap helper for deubgging
620 * @filp: handle doing the mapping
623 * Support the mmap operation on the RAR space for debugging systems
624 * when the memory is not locked down.
627 static int memrar_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
630 * This mmap() implementation is predominantly useful for
631 * debugging since the CPU will be prevented from accessing
632 * RAR memory by the hardware when RAR is properly locked
635 * In order for this implementation to be useful RAR memory
636 * must be not be locked down. However, we only want to do
637 * that when debugging. DO NOT leave RAR memory unlocked in a
638 * deployed device that utilizes RAR.
641 size_t const size
= vma
->vm_end
- vma
->vm_start
;
643 /* Users pass the RAR handle as the mmap() offset parameter. */
644 unsigned long const handle
= vma
->vm_pgoff
<< PAGE_SHIFT
;
646 struct memrar_rar_info
* const rar
= memrar_get_rar_info(handle
);
649 /* Only allow priviledged apps to go poking around this way */
650 if (!capable(CAP_SYS_RAWIO
))
653 /* Invalid RAR handle or size passed to mmap(). */
656 || size
> (handle
- (unsigned long) rar
->iobase
))
660 * Retrieve physical address corresponding to the RAR handle,
661 * and convert it to a page frame.
663 pfn
= memrar_get_physical_address(rar
, handle
) >> PAGE_SHIFT
;
666 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
671 * Map RAR memory into user space. This is really only useful
672 * for debugging purposes since the memory won't be
673 * accessible, i.e. reads return zero and writes are ignored,
674 * when RAR access control is enabled.
676 if (remap_pfn_range(vma
,
683 /* vma->vm_ops = &memrar_mem_ops; */
689 * memrar_open - device open method
690 * @inode: inode to open
693 * As we support multiple arbitrary opens there is no work to be done
697 static int memrar_open(struct inode
*inode
, struct file
*filp
)
699 nonseekable_open(inode
, filp
);
704 * memrar_release - close method for miscev
705 * @inode: inode of device
706 * @filp: handle that is going away
708 * Free up all the regions that belong to this file handle. We use
709 * the handle as a natural Linux style 'lifetime' indicator and to
710 * ensure resources are not leaked when their owner explodes in an
714 static int memrar_release(struct inode
*inode
, struct file
*filp
)
716 /* Free all regions associated with the given file handle. */
718 struct memrar_buffer_info
*pos
;
719 struct memrar_buffer_info
*tmp
;
722 for (z
= 0; z
!= MRST_NUM_RAR
; ++z
) {
723 struct memrar_rar_info
* const rar
= &memrars
[z
];
725 mutex_lock(&rar
->lock
);
727 list_for_each_entry_safe(pos
,
731 if (filp
== pos
->owner
)
732 kref_put(&pos
->refcount
,
733 memrar_release_block_i
);
736 mutex_unlock(&rar
->lock
);
743 * rar_reserve - reserve RAR memory
744 * @buffers: buffers to reserve
745 * @count: number wanted
747 * Reserve a series of buffers in the RAR space. Returns the number of
748 * buffers successfully allocated
751 size_t rar_reserve(struct RAR_buffer
*buffers
, size_t count
)
753 struct RAR_buffer
* const end
=
754 (buffers
== NULL
? buffers
: buffers
+ count
);
755 struct RAR_buffer
*i
;
757 size_t reserve_count
= 0;
759 for (i
= buffers
; i
!= end
; ++i
) {
760 if (memrar_reserve_block(i
, NULL
) == 0)
766 return reserve_count
;
768 EXPORT_SYMBOL(rar_reserve
);
771 * rar_release - return RAR buffers
772 * @buffers: buffers to release
773 * @size: size of released block
775 * Return a set of buffers to the RAR pool
778 size_t rar_release(struct RAR_buffer
*buffers
, size_t count
)
780 struct RAR_buffer
* const end
=
781 (buffers
== NULL
? buffers
: buffers
+ count
);
782 struct RAR_buffer
*i
;
784 size_t release_count
= 0;
786 for (i
= buffers
; i
!= end
; ++i
) {
787 u32
* const handle
= &i
->info
.handle
;
788 if (memrar_release_block(*handle
) == 0) {
790 * @todo We assume we should do this each time
791 * the ref count is decremented. Should
792 * we instead only do this when the ref
793 * count has dropped to zero, and the
794 * buffer has been completely
802 return release_count
;
804 EXPORT_SYMBOL(rar_release
);
807 * rar_handle_to_bus - RAR to bus address
808 * @buffers: RAR buffer structure
809 * @count: number of buffers to convert
811 * Turn a list of RAR handle mappings into actual bus addresses. Note
812 * that when the device is locked down the bus addresses in question
813 * are not CPU accessible.
816 size_t rar_handle_to_bus(struct RAR_buffer
*buffers
, size_t count
)
818 struct RAR_buffer
* const end
=
819 (buffers
== NULL
? buffers
: buffers
+ count
);
820 struct RAR_buffer
*i
;
821 struct memrar_buffer_info
*pos
;
823 size_t conversion_count
= 0;
826 * Find all bus addresses corresponding to the given handles.
828 * @todo Not liking this nested loop. Optimize.
830 for (i
= buffers
; i
!= end
; ++i
) {
831 struct memrar_rar_info
* const rar
=
832 memrar_get_rar_info(i
->info
.handle
);
835 * Check if we have a bogus handle, and then continue
836 * with remaining buffers.
843 mutex_lock(&rar
->lock
);
845 list_for_each_entry(pos
, &rar
->buffers
.list
, list
) {
846 struct RAR_block_info
* const user_info
=
850 * Take into account handle offsets that may
851 * have been added to the base handle, such as
852 * in the following scenario:
854 * u32 handle = base + offset;
855 * rar_handle_to_bus(handle);
858 if (i
->info
.handle
>= user_info
->handle
859 && i
->info
.handle
< (user_info
->handle
860 + user_info
->size
)) {
862 i
->info
.handle
- user_info
->handle
;
864 i
->info
.type
= user_info
->type
;
865 i
->info
.size
= user_info
->size
- offset
;
867 pos
->buffer
.bus_address
870 /* Increment the reference count. */
871 kref_get(&pos
->refcount
);
880 mutex_unlock(&rar
->lock
);
883 return conversion_count
;
885 EXPORT_SYMBOL(rar_handle_to_bus
);
887 static const struct file_operations memrar_fops
= {
888 .owner
= THIS_MODULE
,
889 .unlocked_ioctl
= memrar_ioctl
,
892 .release
= memrar_release
,
896 static struct miscdevice memrar_miscdev
= {
897 .minor
= MISC_DYNAMIC_MINOR
, /* dynamic allocation */
898 .name
= "memrar", /* /dev/memrar */
902 static char const banner
[] __initdata
=
904 "Intel RAR Handler: " MEMRAR_VER
" initialized.\n";
907 * memrar_registration_callback - RAR obtained
910 * We have been granted ownership of the RAR. Add it to our memory
914 static int memrar_registration_callback(unsigned long rar
)
917 * We initialize the RAR parameters early on so that we can
918 * discontinue memrar device initialization and registration
919 * if suitably configured RARs are not available.
921 return memrar_init_rar_resources(rar
, memrar_miscdev
.name
);
925 * memrar_init - initialise RAR support
927 * Initialise support for RAR handlers. This may get loaded before
928 * the RAR support is activated, but the callbacks on the registration
929 * will handle that situation for us anyway.
932 static int __init
memrar_init(void)
940 * Some delayed initialization is performed in this driver.
941 * Make sure resources that are used during driver clean-up
942 * (e.g. during driver's release() function) are fully
943 * initialized before first use. This is particularly
944 * important for the case when the delayed initialization
945 * isn't completed, leaving behind a partially initialized
948 * Such a scenario can occur when RAR is not available on the
949 * platform, and the driver is release()d.
951 for (i
= 0; i
!= ARRAY_SIZE(memrars
); ++i
) {
952 struct memrar_rar_info
* const rar
= &memrars
[i
];
953 mutex_init(&rar
->lock
);
954 INIT_LIST_HEAD(&rar
->buffers
.list
);
957 err
= misc_register(&memrar_miscdev
);
961 /* Now claim the two RARs we want */
962 err
= register_rar(0, memrar_registration_callback
, 0);
966 err
= register_rar(1, memrar_registration_callback
, 1);
970 /* It is possible rar 0 registered and allocated resources then rar 1
971 failed so do a full resource free */
972 memrar_fini_rar_resources();
974 misc_deregister(&memrar_miscdev
);
979 * memrar_exit - unregister and unload
981 * Unregister the device and then unload any mappings and release
985 static void __exit
memrar_exit(void)
987 misc_deregister(&memrar_miscdev
);
988 memrar_fini_rar_resources();
992 module_init(memrar_init
);
993 module_exit(memrar_exit
);
996 MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
997 MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
998 MODULE_LICENSE("GPL");
999 MODULE_VERSION(MEMRAR_VER
);
1005 c-file-style: "linux"