2 * linux/kernel/resource.c
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 * Arbitrary resource management.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/export.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/device.h>
23 #include <linux/pfn.h>
25 #include <linux/resource_ext.h>
29 struct resource ioport_resource
= {
32 .end
= IO_SPACE_LIMIT
,
33 .flags
= IORESOURCE_IO
,
35 EXPORT_SYMBOL(ioport_resource
);
37 struct resource iomem_resource
= {
41 .flags
= IORESOURCE_MEM
,
43 EXPORT_SYMBOL(iomem_resource
);
45 /* constraints to be met while allocating resources */
46 struct resource_constraint
{
47 resource_size_t min
, max
, align
;
48 resource_size_t (*alignf
)(void *, const struct resource
*,
49 resource_size_t
, resource_size_t
);
53 static DEFINE_RWLOCK(resource_lock
);
56 * For memory hotplug, there is no way to free resource entries allocated
57 * by boot mem after the system is up. So for reusing the resource entry
58 * we need to remember the resource.
60 static struct resource
*bootmem_resource_free
;
61 static DEFINE_SPINLOCK(bootmem_resource_lock
);
63 static struct resource
*next_resource(struct resource
*p
, bool sibling_only
)
65 /* Caller wants to traverse through siblings only */
71 while (!p
->sibling
&& p
->parent
)
76 static void *r_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
78 struct resource
*p
= v
;
80 return (void *)next_resource(p
, false);
85 enum { MAX_IORES_LEVEL
= 5 };
87 static void *r_start(struct seq_file
*m
, loff_t
*pos
)
88 __acquires(resource_lock
)
90 struct resource
*p
= m
->private;
92 read_lock(&resource_lock
);
93 for (p
= p
->child
; p
&& l
< *pos
; p
= r_next(m
, p
, &l
))
98 static void r_stop(struct seq_file
*m
, void *v
)
99 __releases(resource_lock
)
101 read_unlock(&resource_lock
);
104 static int r_show(struct seq_file
*m
, void *v
)
106 struct resource
*root
= m
->private;
107 struct resource
*r
= v
, *p
;
108 int width
= root
->end
< 0x10000 ? 4 : 8;
111 for (depth
= 0, p
= r
; depth
< MAX_IORES_LEVEL
; depth
++, p
= p
->parent
)
112 if (p
->parent
== root
)
114 seq_printf(m
, "%*s%0*llx-%0*llx : %s\n",
116 width
, (unsigned long long) r
->start
,
117 width
, (unsigned long long) r
->end
,
118 r
->name
? r
->name
: "<BAD>");
122 static const struct seq_operations resource_op
= {
129 static int ioports_open(struct inode
*inode
, struct file
*file
)
131 int res
= seq_open(file
, &resource_op
);
133 struct seq_file
*m
= file
->private_data
;
134 m
->private = &ioport_resource
;
139 static int iomem_open(struct inode
*inode
, struct file
*file
)
141 int res
= seq_open(file
, &resource_op
);
143 struct seq_file
*m
= file
->private_data
;
144 m
->private = &iomem_resource
;
149 static const struct file_operations proc_ioports_operations
= {
150 .open
= ioports_open
,
153 .release
= seq_release
,
156 static const struct file_operations proc_iomem_operations
= {
160 .release
= seq_release
,
163 static int __init
ioresources_init(void)
165 proc_create("ioports", 0, NULL
, &proc_ioports_operations
);
166 proc_create("iomem", 0, NULL
, &proc_iomem_operations
);
169 __initcall(ioresources_init
);
171 #endif /* CONFIG_PROC_FS */
173 static void free_resource(struct resource
*res
)
178 if (!PageSlab(virt_to_head_page(res
))) {
179 spin_lock(&bootmem_resource_lock
);
180 res
->sibling
= bootmem_resource_free
;
181 bootmem_resource_free
= res
;
182 spin_unlock(&bootmem_resource_lock
);
188 static struct resource
*alloc_resource(gfp_t flags
)
190 struct resource
*res
= NULL
;
192 spin_lock(&bootmem_resource_lock
);
193 if (bootmem_resource_free
) {
194 res
= bootmem_resource_free
;
195 bootmem_resource_free
= res
->sibling
;
197 spin_unlock(&bootmem_resource_lock
);
200 memset(res
, 0, sizeof(struct resource
));
202 res
= kzalloc(sizeof(struct resource
), flags
);
207 /* Return the conflict entry if you can't request it */
208 static struct resource
* __request_resource(struct resource
*root
, struct resource
*new)
210 resource_size_t start
= new->start
;
211 resource_size_t end
= new->end
;
212 struct resource
*tmp
, **p
;
216 if (start
< root
->start
)
223 if (!tmp
|| tmp
->start
> end
) {
230 if (tmp
->end
< start
)
236 static int __release_resource(struct resource
*old
)
238 struct resource
*tmp
, **p
;
240 p
= &old
->parent
->child
;
255 static void __release_child_resources(struct resource
*r
)
257 struct resource
*tmp
, *p
;
258 resource_size_t size
;
268 __release_child_resources(tmp
);
270 printk(KERN_DEBUG
"release child resource %pR\n", tmp
);
271 /* need to restore size, and keep flags */
272 size
= resource_size(tmp
);
278 void release_child_resources(struct resource
*r
)
280 write_lock(&resource_lock
);
281 __release_child_resources(r
);
282 write_unlock(&resource_lock
);
286 * request_resource_conflict - request and reserve an I/O or memory resource
287 * @root: root resource descriptor
288 * @new: resource descriptor desired by caller
290 * Returns 0 for success, conflict resource on error.
292 struct resource
*request_resource_conflict(struct resource
*root
, struct resource
*new)
294 struct resource
*conflict
;
296 write_lock(&resource_lock
);
297 conflict
= __request_resource(root
, new);
298 write_unlock(&resource_lock
);
303 * request_resource - request and reserve an I/O or memory resource
304 * @root: root resource descriptor
305 * @new: resource descriptor desired by caller
307 * Returns 0 for success, negative error code on error.
309 int request_resource(struct resource
*root
, struct resource
*new)
311 struct resource
*conflict
;
313 conflict
= request_resource_conflict(root
, new);
314 return conflict
? -EBUSY
: 0;
317 EXPORT_SYMBOL(request_resource
);
320 * release_resource - release a previously reserved resource
321 * @old: resource pointer
323 int release_resource(struct resource
*old
)
327 write_lock(&resource_lock
);
328 retval
= __release_resource(old
);
329 write_unlock(&resource_lock
);
333 EXPORT_SYMBOL(release_resource
);
336 * Finds the lowest iomem resource existing within [res->start.res->end).
337 * The caller must specify res->start, res->end, res->flags, and optionally
338 * desc. If found, returns 0, res is overwritten, if not found, returns -1.
339 * This function walks the whole tree and not just first level children until
340 * and unless first_level_children_only is true.
342 static int find_next_iomem_res(struct resource
*res
, unsigned long desc
,
343 bool first_level_children_only
)
345 resource_size_t start
, end
;
347 bool sibling_only
= false;
353 BUG_ON(start
>= end
);
355 if (first_level_children_only
)
358 read_lock(&resource_lock
);
360 for (p
= iomem_resource
.child
; p
; p
= next_resource(p
, sibling_only
)) {
361 if ((p
->flags
& res
->flags
) != res
->flags
)
363 if ((desc
!= IORES_DESC_NONE
) && (desc
!= p
->desc
))
365 if (p
->start
> end
) {
369 if ((p
->end
>= start
) && (p
->start
< end
))
373 read_unlock(&resource_lock
);
377 if (res
->start
< p
->start
)
378 res
->start
= p
->start
;
379 if (res
->end
> p
->end
)
385 * Walks through iomem resources and calls func() with matching resource
386 * ranges. This walks through whole tree and not just first level children.
387 * All the memory ranges which overlap start,end and also match flags and
388 * desc are valid candidates.
390 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
391 * @flags: I/O resource flags
395 * NOTE: For a new descriptor search, define a new IORES_DESC in
396 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
398 int walk_iomem_res_desc(unsigned long desc
, unsigned long flags
, u64 start
,
399 u64 end
, void *arg
, int (*func
)(u64
, u64
, void *))
410 while ((res
.start
< res
.end
) &&
411 (!find_next_iomem_res(&res
, desc
, false))) {
413 ret
= (*func
)(res
.start
, res
.end
, arg
);
417 res
.start
= res
.end
+ 1;
425 * This function calls the @func callback against all memory ranges of type
426 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
427 * Now, this function is only for System RAM, it deals with full ranges and
428 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
431 int walk_system_ram_res(u64 start
, u64 end
, void *arg
,
432 int (*func
)(u64
, u64
, void *))
440 res
.flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
442 while ((res
.start
< res
.end
) &&
443 (!find_next_iomem_res(&res
, IORES_DESC_NONE
, true))) {
444 ret
= (*func
)(res
.start
, res
.end
, arg
);
447 res
.start
= res
.end
+ 1;
453 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
456 * This function calls the @func callback against all memory ranges of type
457 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
458 * It is to be used only for System RAM.
460 int walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
461 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
464 unsigned long pfn
, end_pfn
;
468 res
.start
= (u64
) start_pfn
<< PAGE_SHIFT
;
469 res
.end
= ((u64
)(start_pfn
+ nr_pages
) << PAGE_SHIFT
) - 1;
470 res
.flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
472 while ((res
.start
< res
.end
) &&
473 (find_next_iomem_res(&res
, IORES_DESC_NONE
, true) >= 0)) {
474 pfn
= (res
.start
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
475 end_pfn
= (res
.end
+ 1) >> PAGE_SHIFT
;
477 ret
= (*func
)(pfn
, end_pfn
- pfn
, arg
);
480 res
.start
= res
.end
+ 1;
488 static int __is_ram(unsigned long pfn
, unsigned long nr_pages
, void *arg
)
493 * This generic page_is_ram() returns true if specified address is
494 * registered as System RAM in iomem_resource list.
496 int __weak
page_is_ram(unsigned long pfn
)
498 return walk_system_ram_range(pfn
, 1, NULL
, __is_ram
) == 1;
500 EXPORT_SYMBOL_GPL(page_is_ram
);
503 * region_intersects() - determine intersection of region with known resources
504 * @start: region start address
505 * @size: size of region
506 * @flags: flags of resource (in iomem_resource)
507 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
509 * Check if the specified region partially overlaps or fully eclipses a
510 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
511 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
512 * return REGION_MIXED if the region overlaps @flags/@desc and another
513 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
514 * and no other defined resource. Note that REGION_INTERSECTS is also
515 * returned in the case when the specified region overlaps RAM and undefined
518 * region_intersect() is used by memory remapping functions to ensure
519 * the user is not remapping RAM and is a vast speed up over walking
520 * through the resource table page by page.
522 int region_intersects(resource_size_t start
, size_t size
, unsigned long flags
,
525 resource_size_t end
= start
+ size
- 1;
526 int type
= 0; int other
= 0;
529 read_lock(&resource_lock
);
530 for (p
= iomem_resource
.child
; p
; p
= p
->sibling
) {
531 bool is_type
= (((p
->flags
& flags
) == flags
) &&
532 ((desc
== IORES_DESC_NONE
) ||
535 if (start
>= p
->start
&& start
<= p
->end
)
536 is_type
? type
++ : other
++;
537 if (end
>= p
->start
&& end
<= p
->end
)
538 is_type
? type
++ : other
++;
539 if (p
->start
>= start
&& p
->end
<= end
)
540 is_type
? type
++ : other
++;
542 read_unlock(&resource_lock
);
545 return type
? REGION_INTERSECTS
: REGION_DISJOINT
;
550 return REGION_DISJOINT
;
552 EXPORT_SYMBOL_GPL(region_intersects
);
554 void __weak
arch_remove_reservations(struct resource
*avail
)
558 static resource_size_t
simple_align_resource(void *data
,
559 const struct resource
*avail
,
560 resource_size_t size
,
561 resource_size_t align
)
566 static void resource_clip(struct resource
*res
, resource_size_t min
,
569 if (res
->start
< min
)
576 * Find empty slot in the resource tree with the given range and
577 * alignment constraints
579 static int __find_resource(struct resource
*root
, struct resource
*old
,
580 struct resource
*new,
581 resource_size_t size
,
582 struct resource_constraint
*constraint
)
584 struct resource
*this = root
->child
;
585 struct resource tmp
= *new, avail
, alloc
;
587 tmp
.start
= root
->start
;
589 * Skip past an allocated resource that starts at 0, since the assignment
590 * of this->start - 1 to tmp->end below would cause an underflow.
592 if (this && this->start
== root
->start
) {
593 tmp
.start
= (this == old
) ? old
->start
: this->end
+ 1;
594 this = this->sibling
;
598 tmp
.end
= (this == old
) ? this->end
: this->start
- 1;
602 if (tmp
.end
< tmp
.start
)
605 resource_clip(&tmp
, constraint
->min
, constraint
->max
);
606 arch_remove_reservations(&tmp
);
608 /* Check for overflow after ALIGN() */
609 avail
.start
= ALIGN(tmp
.start
, constraint
->align
);
611 avail
.flags
= new->flags
& ~IORESOURCE_UNSET
;
612 if (avail
.start
>= tmp
.start
) {
613 alloc
.flags
= avail
.flags
;
614 alloc
.start
= constraint
->alignf(constraint
->alignf_data
, &avail
,
615 size
, constraint
->align
);
616 alloc
.end
= alloc
.start
+ size
- 1;
617 if (resource_contains(&avail
, &alloc
)) {
618 new->start
= alloc
.start
;
619 new->end
= alloc
.end
;
624 next
: if (!this || this->end
== root
->end
)
628 tmp
.start
= this->end
+ 1;
629 this = this->sibling
;
635 * Find empty slot in the resource tree given range and alignment.
637 static int find_resource(struct resource
*root
, struct resource
*new,
638 resource_size_t size
,
639 struct resource_constraint
*constraint
)
641 return __find_resource(root
, NULL
, new, size
, constraint
);
645 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
646 * The resource will be relocated if the new size cannot be reallocated in the
649 * @root: root resource descriptor
650 * @old: resource descriptor desired by caller
651 * @newsize: new size of the resource descriptor
652 * @constraint: the size and alignment constraints to be met.
654 static int reallocate_resource(struct resource
*root
, struct resource
*old
,
655 resource_size_t newsize
,
656 struct resource_constraint
*constraint
)
659 struct resource
new = *old
;
660 struct resource
*conflict
;
662 write_lock(&resource_lock
);
664 if ((err
= __find_resource(root
, old
, &new, newsize
, constraint
)))
667 if (resource_contains(&new, old
)) {
668 old
->start
= new.start
;
678 if (resource_contains(old
, &new)) {
679 old
->start
= new.start
;
682 __release_resource(old
);
684 conflict
= __request_resource(root
, old
);
688 write_unlock(&resource_lock
);
694 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
695 * The resource will be reallocated with a new size if it was already allocated
696 * @root: root resource descriptor
697 * @new: resource descriptor desired by caller
698 * @size: requested resource region size
699 * @min: minimum boundary to allocate
700 * @max: maximum boundary to allocate
701 * @align: alignment requested, in bytes
702 * @alignf: alignment function, optional, called if not NULL
703 * @alignf_data: arbitrary data to pass to the @alignf function
705 int allocate_resource(struct resource
*root
, struct resource
*new,
706 resource_size_t size
, resource_size_t min
,
707 resource_size_t max
, resource_size_t align
,
708 resource_size_t (*alignf
)(void *,
709 const struct resource
*,
715 struct resource_constraint constraint
;
718 alignf
= simple_align_resource
;
720 constraint
.min
= min
;
721 constraint
.max
= max
;
722 constraint
.align
= align
;
723 constraint
.alignf
= alignf
;
724 constraint
.alignf_data
= alignf_data
;
727 /* resource is already allocated, try reallocating with
728 the new constraints */
729 return reallocate_resource(root
, new, size
, &constraint
);
732 write_lock(&resource_lock
);
733 err
= find_resource(root
, new, size
, &constraint
);
734 if (err
>= 0 && __request_resource(root
, new))
736 write_unlock(&resource_lock
);
740 EXPORT_SYMBOL(allocate_resource
);
743 * lookup_resource - find an existing resource by a resource start address
744 * @root: root resource descriptor
745 * @start: resource start address
747 * Returns a pointer to the resource if found, NULL otherwise
749 struct resource
*lookup_resource(struct resource
*root
, resource_size_t start
)
751 struct resource
*res
;
753 read_lock(&resource_lock
);
754 for (res
= root
->child
; res
; res
= res
->sibling
) {
755 if (res
->start
== start
)
758 read_unlock(&resource_lock
);
764 * Insert a resource into the resource tree. If successful, return NULL,
765 * otherwise return the conflicting resource (compare to __request_resource())
767 static struct resource
* __insert_resource(struct resource
*parent
, struct resource
*new)
769 struct resource
*first
, *next
;
771 for (;; parent
= first
) {
772 first
= __request_resource(parent
, new);
778 if (WARN_ON(first
== new)) /* duplicated insertion */
781 if ((first
->start
> new->start
) || (first
->end
< new->end
))
783 if ((first
->start
== new->start
) && (first
->end
== new->end
))
787 for (next
= first
; ; next
= next
->sibling
) {
788 /* Partial overlap? Bad, and unfixable */
789 if (next
->start
< new->start
|| next
->end
> new->end
)
793 if (next
->sibling
->start
> new->end
)
797 new->parent
= parent
;
798 new->sibling
= next
->sibling
;
801 next
->sibling
= NULL
;
802 for (next
= first
; next
; next
= next
->sibling
)
805 if (parent
->child
== first
) {
808 next
= parent
->child
;
809 while (next
->sibling
!= first
)
810 next
= next
->sibling
;
817 * insert_resource_conflict - Inserts resource in the resource tree
818 * @parent: parent of the new resource
819 * @new: new resource to insert
821 * Returns 0 on success, conflict resource if the resource can't be inserted.
823 * This function is equivalent to request_resource_conflict when no conflict
824 * happens. If a conflict happens, and the conflicting resources
825 * entirely fit within the range of the new resource, then the new
826 * resource is inserted and the conflicting resources become children of
829 struct resource
*insert_resource_conflict(struct resource
*parent
, struct resource
*new)
831 struct resource
*conflict
;
833 write_lock(&resource_lock
);
834 conflict
= __insert_resource(parent
, new);
835 write_unlock(&resource_lock
);
840 * insert_resource - Inserts a resource in the resource tree
841 * @parent: parent of the new resource
842 * @new: new resource to insert
844 * Returns 0 on success, -EBUSY if the resource can't be inserted.
846 int insert_resource(struct resource
*parent
, struct resource
*new)
848 struct resource
*conflict
;
850 conflict
= insert_resource_conflict(parent
, new);
851 return conflict
? -EBUSY
: 0;
855 * insert_resource_expand_to_fit - Insert a resource into the resource tree
856 * @root: root resource descriptor
857 * @new: new resource to insert
859 * Insert a resource into the resource tree, possibly expanding it in order
860 * to make it encompass any conflicting resources.
862 void insert_resource_expand_to_fit(struct resource
*root
, struct resource
*new)
867 write_lock(&resource_lock
);
869 struct resource
*conflict
;
871 conflict
= __insert_resource(root
, new);
874 if (conflict
== root
)
877 /* Ok, expand resource to cover the conflict, then try again .. */
878 if (conflict
->start
< new->start
)
879 new->start
= conflict
->start
;
880 if (conflict
->end
> new->end
)
881 new->end
= conflict
->end
;
883 printk("Expanded resource %s due to conflict with %s\n", new->name
, conflict
->name
);
885 write_unlock(&resource_lock
);
888 static int __adjust_resource(struct resource
*res
, resource_size_t start
,
889 resource_size_t size
)
891 struct resource
*tmp
, *parent
= res
->parent
;
892 resource_size_t end
= start
+ size
- 1;
898 if ((start
< parent
->start
) || (end
> parent
->end
))
901 if (res
->sibling
&& (res
->sibling
->start
<= end
))
906 while (tmp
->sibling
!= res
)
908 if (start
<= tmp
->end
)
913 for (tmp
= res
->child
; tmp
; tmp
= tmp
->sibling
)
914 if ((tmp
->start
< start
) || (tmp
->end
> end
))
926 * adjust_resource - modify a resource's start and size
927 * @res: resource to modify
928 * @start: new start value
931 * Given an existing resource, change its start and size to match the
932 * arguments. Returns 0 on success, -EBUSY if it can't fit.
933 * Existing children of the resource are assumed to be immutable.
935 int adjust_resource(struct resource
*res
, resource_size_t start
,
936 resource_size_t size
)
940 write_lock(&resource_lock
);
941 result
= __adjust_resource(res
, start
, size
);
942 write_unlock(&resource_lock
);
945 EXPORT_SYMBOL(adjust_resource
);
947 static void __init
__reserve_region_with_split(struct resource
*root
,
948 resource_size_t start
, resource_size_t end
,
951 struct resource
*parent
= root
;
952 struct resource
*conflict
;
953 struct resource
*res
= alloc_resource(GFP_ATOMIC
);
954 struct resource
*next_res
= NULL
;
962 res
->flags
= IORESOURCE_BUSY
;
963 res
->desc
= IORES_DESC_NONE
;
967 conflict
= __request_resource(parent
, res
);
976 /* conflict covered whole area */
977 if (conflict
->start
<= res
->start
&&
978 conflict
->end
>= res
->end
) {
984 /* failed, split and try again */
985 if (conflict
->start
> res
->start
) {
987 res
->end
= conflict
->start
- 1;
988 if (conflict
->end
< end
) {
989 next_res
= alloc_resource(GFP_ATOMIC
);
994 next_res
->name
= name
;
995 next_res
->start
= conflict
->end
+ 1;
997 next_res
->flags
= IORESOURCE_BUSY
;
998 next_res
->desc
= IORES_DESC_NONE
;
1001 res
->start
= conflict
->end
+ 1;
1007 void __init
reserve_region_with_split(struct resource
*root
,
1008 resource_size_t start
, resource_size_t end
,
1013 write_lock(&resource_lock
);
1014 if (root
->start
> start
|| root
->end
< end
) {
1015 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1016 (unsigned long long)start
, (unsigned long long)end
,
1018 if (start
> root
->end
|| end
< root
->start
)
1021 if (end
> root
->end
)
1023 if (start
< root
->start
)
1024 start
= root
->start
;
1025 pr_err("fixing request to [0x%llx-0x%llx]\n",
1026 (unsigned long long)start
,
1027 (unsigned long long)end
);
1032 __reserve_region_with_split(root
, start
, end
, name
);
1033 write_unlock(&resource_lock
);
1037 * resource_alignment - calculate resource's alignment
1038 * @res: resource pointer
1040 * Returns alignment on success, 0 (invalid alignment) on failure.
1042 resource_size_t
resource_alignment(struct resource
*res
)
1044 switch (res
->flags
& (IORESOURCE_SIZEALIGN
| IORESOURCE_STARTALIGN
)) {
1045 case IORESOURCE_SIZEALIGN
:
1046 return resource_size(res
);
1047 case IORESOURCE_STARTALIGN
:
1055 * This is compatibility stuff for IO resources.
1057 * Note how this, unlike the above, knows about
1058 * the IO flag meanings (busy etc).
1060 * request_region creates a new busy region.
1062 * release_region releases a matching busy region.
1065 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait
);
1068 * __request_region - create a new busy resource region
1069 * @parent: parent resource descriptor
1070 * @start: resource start address
1071 * @n: resource region size
1072 * @name: reserving caller's ID string
1073 * @flags: IO resource flags
1075 struct resource
* __request_region(struct resource
*parent
,
1076 resource_size_t start
, resource_size_t n
,
1077 const char *name
, int flags
)
1079 DECLARE_WAITQUEUE(wait
, current
);
1080 struct resource
*res
= alloc_resource(GFP_KERNEL
);
1087 res
->end
= start
+ n
- 1;
1088 res
->flags
= resource_type(parent
) | resource_ext_type(parent
);
1089 res
->flags
|= IORESOURCE_BUSY
| flags
;
1090 res
->desc
= IORES_DESC_NONE
;
1092 write_lock(&resource_lock
);
1095 struct resource
*conflict
;
1097 conflict
= __request_resource(parent
, res
);
1100 if (conflict
!= parent
) {
1101 if (!(conflict
->flags
& IORESOURCE_BUSY
)) {
1106 if (conflict
->flags
& flags
& IORESOURCE_MUXED
) {
1107 add_wait_queue(&muxed_resource_wait
, &wait
);
1108 write_unlock(&resource_lock
);
1109 set_current_state(TASK_UNINTERRUPTIBLE
);
1111 remove_wait_queue(&muxed_resource_wait
, &wait
);
1112 write_lock(&resource_lock
);
1115 /* Uhhuh, that didn't work out.. */
1120 write_unlock(&resource_lock
);
1123 EXPORT_SYMBOL(__request_region
);
1126 * __release_region - release a previously reserved resource region
1127 * @parent: parent resource descriptor
1128 * @start: resource start address
1129 * @n: resource region size
1131 * The described resource region must match a currently busy region.
1133 void __release_region(struct resource
*parent
, resource_size_t start
,
1136 struct resource
**p
;
1137 resource_size_t end
;
1140 end
= start
+ n
- 1;
1142 write_lock(&resource_lock
);
1145 struct resource
*res
= *p
;
1149 if (res
->start
<= start
&& res
->end
>= end
) {
1150 if (!(res
->flags
& IORESOURCE_BUSY
)) {
1154 if (res
->start
!= start
|| res
->end
!= end
)
1157 write_unlock(&resource_lock
);
1158 if (res
->flags
& IORESOURCE_MUXED
)
1159 wake_up(&muxed_resource_wait
);
1166 write_unlock(&resource_lock
);
1168 printk(KERN_WARNING
"Trying to free nonexistent resource "
1169 "<%016llx-%016llx>\n", (unsigned long long)start
,
1170 (unsigned long long)end
);
1172 EXPORT_SYMBOL(__release_region
);
1174 #ifdef CONFIG_MEMORY_HOTREMOVE
1176 * release_mem_region_adjustable - release a previously reserved memory region
1177 * @parent: parent resource descriptor
1178 * @start: resource start address
1179 * @size: resource region size
1181 * This interface is intended for memory hot-delete. The requested region
1182 * is released from a currently busy memory resource. The requested region
1183 * must either match exactly or fit into a single busy resource entry. In
1184 * the latter case, the remaining resource is adjusted accordingly.
1185 * Existing children of the busy memory resource must be immutable in the
1189 * - Additional release conditions, such as overlapping region, can be
1190 * supported after they are confirmed as valid cases.
1191 * - When a busy memory resource gets split into two entries, the code
1192 * assumes that all children remain in the lower address entry for
1193 * simplicity. Enhance this logic when necessary.
1195 int release_mem_region_adjustable(struct resource
*parent
,
1196 resource_size_t start
, resource_size_t size
)
1198 struct resource
**p
;
1199 struct resource
*res
;
1200 struct resource
*new_res
;
1201 resource_size_t end
;
1204 end
= start
+ size
- 1;
1205 if ((start
< parent
->start
) || (end
> parent
->end
))
1208 /* The alloc_resource() result gets checked later */
1209 new_res
= alloc_resource(GFP_KERNEL
);
1212 write_lock(&resource_lock
);
1214 while ((res
= *p
)) {
1215 if (res
->start
>= end
)
1218 /* look for the next resource if it does not fit into */
1219 if (res
->start
> start
|| res
->end
< end
) {
1224 if (!(res
->flags
& IORESOURCE_MEM
))
1227 if (!(res
->flags
& IORESOURCE_BUSY
)) {
1232 /* found the target resource; let's adjust accordingly */
1233 if (res
->start
== start
&& res
->end
== end
) {
1234 /* free the whole entry */
1238 } else if (res
->start
== start
&& res
->end
!= end
) {
1239 /* adjust the start */
1240 ret
= __adjust_resource(res
, end
+ 1,
1242 } else if (res
->start
!= start
&& res
->end
== end
) {
1243 /* adjust the end */
1244 ret
= __adjust_resource(res
, res
->start
,
1245 start
- res
->start
);
1247 /* split into two entries */
1252 new_res
->name
= res
->name
;
1253 new_res
->start
= end
+ 1;
1254 new_res
->end
= res
->end
;
1255 new_res
->flags
= res
->flags
;
1256 new_res
->desc
= res
->desc
;
1257 new_res
->parent
= res
->parent
;
1258 new_res
->sibling
= res
->sibling
;
1259 new_res
->child
= NULL
;
1261 ret
= __adjust_resource(res
, res
->start
,
1262 start
- res
->start
);
1265 res
->sibling
= new_res
;
1272 write_unlock(&resource_lock
);
1273 free_resource(new_res
);
1276 #endif /* CONFIG_MEMORY_HOTREMOVE */
1279 * Managed region resource
1281 static void devm_resource_release(struct device
*dev
, void *ptr
)
1283 struct resource
**r
= ptr
;
1285 release_resource(*r
);
1289 * devm_request_resource() - request and reserve an I/O or memory resource
1290 * @dev: device for which to request the resource
1291 * @root: root of the resource tree from which to request the resource
1292 * @new: descriptor of the resource to request
1294 * This is a device-managed version of request_resource(). There is usually
1295 * no need to release resources requested by this function explicitly since
1296 * that will be taken care of when the device is unbound from its driver.
1297 * If for some reason the resource needs to be released explicitly, because
1298 * of ordering issues for example, drivers must call devm_release_resource()
1299 * rather than the regular release_resource().
1301 * When a conflict is detected between any existing resources and the newly
1302 * requested resource, an error message will be printed.
1304 * Returns 0 on success or a negative error code on failure.
1306 int devm_request_resource(struct device
*dev
, struct resource
*root
,
1307 struct resource
*new)
1309 struct resource
*conflict
, **ptr
;
1311 ptr
= devres_alloc(devm_resource_release
, sizeof(*ptr
), GFP_KERNEL
);
1317 conflict
= request_resource_conflict(root
, new);
1319 dev_err(dev
, "resource collision: %pR conflicts with %s %pR\n",
1320 new, conflict
->name
, conflict
);
1325 devres_add(dev
, ptr
);
1328 EXPORT_SYMBOL(devm_request_resource
);
1330 static int devm_resource_match(struct device
*dev
, void *res
, void *data
)
1332 struct resource
**ptr
= res
;
1334 return *ptr
== data
;
1338 * devm_release_resource() - release a previously requested resource
1339 * @dev: device for which to release the resource
1340 * @new: descriptor of the resource to release
1342 * Releases a resource previously requested using devm_request_resource().
1344 void devm_release_resource(struct device
*dev
, struct resource
*new)
1346 WARN_ON(devres_release(dev
, devm_resource_release
, devm_resource_match
,
1349 EXPORT_SYMBOL(devm_release_resource
);
1351 struct region_devres
{
1352 struct resource
*parent
;
1353 resource_size_t start
;
1357 static void devm_region_release(struct device
*dev
, void *res
)
1359 struct region_devres
*this = res
;
1361 __release_region(this->parent
, this->start
, this->n
);
1364 static int devm_region_match(struct device
*dev
, void *res
, void *match_data
)
1366 struct region_devres
*this = res
, *match
= match_data
;
1368 return this->parent
== match
->parent
&&
1369 this->start
== match
->start
&& this->n
== match
->n
;
1372 struct resource
* __devm_request_region(struct device
*dev
,
1373 struct resource
*parent
, resource_size_t start
,
1374 resource_size_t n
, const char *name
)
1376 struct region_devres
*dr
= NULL
;
1377 struct resource
*res
;
1379 dr
= devres_alloc(devm_region_release
, sizeof(struct region_devres
),
1384 dr
->parent
= parent
;
1388 res
= __request_region(parent
, start
, n
, name
, 0);
1390 devres_add(dev
, dr
);
1396 EXPORT_SYMBOL(__devm_request_region
);
1398 void __devm_release_region(struct device
*dev
, struct resource
*parent
,
1399 resource_size_t start
, resource_size_t n
)
1401 struct region_devres match_data
= { parent
, start
, n
};
1403 __release_region(parent
, start
, n
);
1404 WARN_ON(devres_destroy(dev
, devm_region_release
, devm_region_match
,
1407 EXPORT_SYMBOL(__devm_release_region
);
1410 * Called from init/main.c to reserve IO ports.
1412 #define MAXRESERVE 4
1413 static int __init
reserve_setup(char *str
)
1415 static int reserved
;
1416 static struct resource reserve
[MAXRESERVE
];
1419 unsigned int io_start
, io_num
;
1422 if (get_option (&str
, &io_start
) != 2)
1424 if (get_option (&str
, &io_num
) == 0)
1426 if (x
< MAXRESERVE
) {
1427 struct resource
*res
= reserve
+ x
;
1428 res
->name
= "reserved";
1429 res
->start
= io_start
;
1430 res
->end
= io_start
+ io_num
- 1;
1431 res
->flags
= IORESOURCE_BUSY
;
1432 res
->desc
= IORES_DESC_NONE
;
1434 if (request_resource(res
->start
>= 0x10000 ? &iomem_resource
: &ioport_resource
, res
) == 0)
1441 __setup("reserve=", reserve_setup
);
1444 * Check if the requested addr and size spans more than any slot in the
1445 * iomem resource tree.
1447 int iomem_map_sanity_check(resource_size_t addr
, unsigned long size
)
1449 struct resource
*p
= &iomem_resource
;
1453 read_lock(&resource_lock
);
1454 for (p
= p
->child
; p
; p
= r_next(NULL
, p
, &l
)) {
1456 * We can probably skip the resources without
1457 * IORESOURCE_IO attribute?
1459 if (p
->start
>= addr
+ size
)
1463 if (PFN_DOWN(p
->start
) <= PFN_DOWN(addr
) &&
1464 PFN_DOWN(p
->end
) >= PFN_DOWN(addr
+ size
- 1))
1467 * if a resource is "BUSY", it's not a hardware resource
1468 * but a driver mapping of such a resource; we don't want
1469 * to warn for those; some drivers legitimately map only
1470 * partial hardware resources. (example: vesafb)
1472 if (p
->flags
& IORESOURCE_BUSY
)
1475 printk(KERN_WARNING
"resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
1476 (unsigned long long)addr
,
1477 (unsigned long long)(addr
+ size
- 1),
1482 read_unlock(&resource_lock
);
1487 #ifdef CONFIG_STRICT_DEVMEM
1488 static int strict_iomem_checks
= 1;
1490 static int strict_iomem_checks
;
1494 * check if an address is reserved in the iomem resource tree
1495 * returns 1 if reserved, 0 if not reserved.
1497 int iomem_is_exclusive(u64 addr
)
1499 struct resource
*p
= &iomem_resource
;
1502 int size
= PAGE_SIZE
;
1504 if (!strict_iomem_checks
)
1507 addr
= addr
& PAGE_MASK
;
1509 read_lock(&resource_lock
);
1510 for (p
= p
->child
; p
; p
= r_next(NULL
, p
, &l
)) {
1512 * We can probably skip the resources without
1513 * IORESOURCE_IO attribute?
1515 if (p
->start
>= addr
+ size
)
1520 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1521 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1524 if ((p
->flags
& IORESOURCE_BUSY
) == 0)
1526 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM
)
1527 || p
->flags
& IORESOURCE_EXCLUSIVE
) {
1532 read_unlock(&resource_lock
);
1537 struct resource_entry
*resource_list_create_entry(struct resource
*res
,
1540 struct resource_entry
*entry
;
1542 entry
= kzalloc(sizeof(*entry
) + extra_size
, GFP_KERNEL
);
1544 INIT_LIST_HEAD(&entry
->node
);
1545 entry
->res
= res
? res
: &entry
->__res
;
1550 EXPORT_SYMBOL(resource_list_create_entry
);
1552 void resource_list_free(struct list_head
*head
)
1554 struct resource_entry
*entry
, *tmp
;
1556 list_for_each_entry_safe(entry
, tmp
, head
, node
)
1557 resource_list_destroy_entry(entry
);
1559 EXPORT_SYMBOL(resource_list_free
);
1561 static int __init
strict_iomem(char *str
)
1563 if (strstr(str
, "relaxed"))
1564 strict_iomem_checks
= 0;
1565 if (strstr(str
, "strict"))
1566 strict_iomem_checks
= 1;
1570 __setup("iomem=", strict_iomem
);