1 #include <linux/bootmem.h>
3 #include <linux/export.h>
4 #include <linux/rwlock.h>
5 #include <linux/slab.h>
6 #include <linux/types.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/vmalloc.h>
9 #include <linux/swiotlb.h>
12 #include <xen/interface/memory.h>
13 #include <xen/swiotlb-xen.h>
15 #include <asm/cacheflush.h>
16 #include <asm/xen/page.h>
17 #include <asm/xen/hypercall.h>
18 #include <asm/xen/interface.h>
20 struct xen_p2m_entry
{
23 unsigned long nr_pages
;
24 struct rb_node rbnode_mach
;
25 struct rb_node rbnode_phys
;
29 struct rb_root phys_to_mach
= RB_ROOT
;
30 static struct rb_root mach_to_phys
= RB_ROOT
;
32 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry
*new)
34 struct rb_node
**link
= &phys_to_mach
.rb_node
;
35 struct rb_node
*parent
= NULL
;
36 struct xen_p2m_entry
*entry
;
41 entry
= rb_entry(parent
, struct xen_p2m_entry
, rbnode_phys
);
43 if (new->mfn
== entry
->mfn
)
45 if (new->pfn
== entry
->pfn
)
48 if (new->pfn
< entry
->pfn
)
49 link
= &(*link
)->rb_left
;
51 link
= &(*link
)->rb_right
;
53 rb_link_node(&new->rbnode_phys
, parent
, link
);
54 rb_insert_color(&new->rbnode_phys
, &phys_to_mach
);
59 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
60 __func__
, &new->pfn
, &new->mfn
, &entry
->pfn
, &entry
->mfn
);
65 unsigned long __pfn_to_mfn(unsigned long pfn
)
67 struct rb_node
*n
= phys_to_mach
.rb_node
;
68 struct xen_p2m_entry
*entry
;
69 unsigned long irqflags
;
71 read_lock_irqsave(&p2m_lock
, irqflags
);
73 entry
= rb_entry(n
, struct xen_p2m_entry
, rbnode_phys
);
74 if (entry
->pfn
<= pfn
&&
75 entry
->pfn
+ entry
->nr_pages
> pfn
) {
76 read_unlock_irqrestore(&p2m_lock
, irqflags
);
77 return entry
->mfn
+ (pfn
- entry
->pfn
);
84 read_unlock_irqrestore(&p2m_lock
, irqflags
);
86 return INVALID_P2M_ENTRY
;
88 EXPORT_SYMBOL_GPL(__pfn_to_mfn
);
90 static int xen_add_mach_to_phys_entry(struct xen_p2m_entry
*new)
92 struct rb_node
**link
= &mach_to_phys
.rb_node
;
93 struct rb_node
*parent
= NULL
;
94 struct xen_p2m_entry
*entry
;
99 entry
= rb_entry(parent
, struct xen_p2m_entry
, rbnode_mach
);
101 if (new->mfn
== entry
->mfn
)
103 if (new->pfn
== entry
->pfn
)
106 if (new->mfn
< entry
->mfn
)
107 link
= &(*link
)->rb_left
;
109 link
= &(*link
)->rb_right
;
111 rb_link_node(&new->rbnode_mach
, parent
, link
);
112 rb_insert_color(&new->rbnode_mach
, &mach_to_phys
);
117 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
118 __func__
, &new->pfn
, &new->mfn
, &entry
->pfn
, &entry
->mfn
);
123 unsigned long __mfn_to_pfn(unsigned long mfn
)
125 struct rb_node
*n
= mach_to_phys
.rb_node
;
126 struct xen_p2m_entry
*entry
;
127 unsigned long irqflags
;
129 read_lock_irqsave(&p2m_lock
, irqflags
);
131 entry
= rb_entry(n
, struct xen_p2m_entry
, rbnode_mach
);
132 if (entry
->mfn
<= mfn
&&
133 entry
->mfn
+ entry
->nr_pages
> mfn
) {
134 read_unlock_irqrestore(&p2m_lock
, irqflags
);
135 return entry
->pfn
+ (mfn
- entry
->mfn
);
137 if (mfn
< entry
->mfn
)
142 read_unlock_irqrestore(&p2m_lock
, irqflags
);
144 return INVALID_P2M_ENTRY
;
146 EXPORT_SYMBOL_GPL(__mfn_to_pfn
);
148 bool __set_phys_to_machine_multi(unsigned long pfn
,
149 unsigned long mfn
, unsigned long nr_pages
)
152 unsigned long irqflags
;
153 struct xen_p2m_entry
*p2m_entry
;
154 struct rb_node
*n
= phys_to_mach
.rb_node
;
156 if (mfn
== INVALID_P2M_ENTRY
) {
157 write_lock_irqsave(&p2m_lock
, irqflags
);
159 p2m_entry
= rb_entry(n
, struct xen_p2m_entry
, rbnode_phys
);
160 if (p2m_entry
->pfn
<= pfn
&&
161 p2m_entry
->pfn
+ p2m_entry
->nr_pages
> pfn
) {
162 rb_erase(&p2m_entry
->rbnode_mach
, &mach_to_phys
);
163 rb_erase(&p2m_entry
->rbnode_phys
, &phys_to_mach
);
164 write_unlock_irqrestore(&p2m_lock
, irqflags
);
168 if (pfn
< p2m_entry
->pfn
)
173 write_unlock_irqrestore(&p2m_lock
, irqflags
);
177 p2m_entry
= kzalloc(sizeof(struct xen_p2m_entry
), GFP_NOWAIT
);
179 pr_warn("cannot allocate xen_p2m_entry\n");
182 p2m_entry
->pfn
= pfn
;
183 p2m_entry
->nr_pages
= nr_pages
;
184 p2m_entry
->mfn
= mfn
;
186 write_lock_irqsave(&p2m_lock
, irqflags
);
187 if ((rc
= xen_add_phys_to_mach_entry(p2m_entry
) < 0) ||
188 (rc
= xen_add_mach_to_phys_entry(p2m_entry
) < 0)) {
189 write_unlock_irqrestore(&p2m_lock
, irqflags
);
192 write_unlock_irqrestore(&p2m_lock
, irqflags
);
195 EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi
);
197 bool __set_phys_to_machine(unsigned long pfn
, unsigned long mfn
)
199 return __set_phys_to_machine_multi(pfn
, mfn
, 1);
201 EXPORT_SYMBOL_GPL(__set_phys_to_machine
);
205 rwlock_init(&p2m_lock
);
208 arch_initcall(p2m_init
);