Merge branches 'acpica-fixes' and 'device-properties-fixes'
[deliverable/linux.git] / include / linux / msi.h
1 #ifndef LINUX_MSI_H
2 #define LINUX_MSI_H
3
4 #include <linux/kobject.h>
5 #include <linux/list.h>
6
7 struct msi_msg {
8 u32 address_lo; /* low 32 bits of msi message address */
9 u32 address_hi; /* high 32 bits of msi message address */
10 u32 data; /* 16 bits of msi message data */
11 };
12
13 extern int pci_msi_ignore_mask;
14 /* Helper functions */
15 struct irq_data;
16 struct msi_desc;
17 struct pci_dev;
18 struct platform_msi_priv_data;
19 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
21
22 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
23 struct msi_msg *msg);
24
25 /**
26 * platform_msi_desc - Platform device specific msi descriptor data
27 * @msi_priv_data: Pointer to platform private data
28 * @msi_index: The index of the MSI descriptor for multi MSI
29 */
30 struct platform_msi_desc {
31 struct platform_msi_priv_data *msi_priv_data;
32 u16 msi_index;
33 };
34
35 /**
36 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
37 * @msi_index: The index of the MSI descriptor
38 */
39 struct fsl_mc_msi_desc {
40 u16 msi_index;
41 };
42
43 /**
44 * struct msi_desc - Descriptor structure for MSI based interrupts
45 * @list: List head for management
46 * @irq: The base interrupt number
47 * @nvec_used: The number of vectors used
48 * @dev: Pointer to the device which uses this descriptor
49 * @msg: The last set MSI message cached for reuse
50 *
51 * @masked: [PCI MSI/X] Mask bits
52 * @is_msix: [PCI MSI/X] True if MSI-X
53 * @multiple: [PCI MSI/X] log2 num of messages allocated
54 * @multi_cap: [PCI MSI/X] log2 num of messages supported
55 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
56 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
57 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
58 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
59 * @mask_pos: [PCI MSI] Mask register position
60 * @mask_base: [PCI MSI-X] Mask register base address
61 * @platform: [platform] Platform device specific msi descriptor data
62 */
63 struct msi_desc {
64 /* Shared device/bus type independent data */
65 struct list_head list;
66 unsigned int irq;
67 unsigned int nvec_used;
68 struct device *dev;
69 struct msi_msg msg;
70
71 union {
72 /* PCI MSI/X specific data */
73 struct {
74 u32 masked;
75 struct {
76 __u8 is_msix : 1;
77 __u8 multiple : 3;
78 __u8 multi_cap : 3;
79 __u8 maskbit : 1;
80 __u8 is_64 : 1;
81 __u16 entry_nr;
82 unsigned default_irq;
83 } msi_attrib;
84 union {
85 u8 mask_pos;
86 void __iomem *mask_base;
87 };
88 };
89
90 /*
91 * Non PCI variants add their data structure here. New
92 * entries need to use a named structure. We want
93 * proper name spaces for this. The PCI part is
94 * anonymous for now as it would require an immediate
95 * tree wide cleanup.
96 */
97 struct platform_msi_desc platform;
98 struct fsl_mc_msi_desc fsl_mc;
99 };
100 };
101
102 /* Helpers to hide struct msi_desc implementation details */
103 #define msi_desc_to_dev(desc) ((desc)->dev)
104 #define dev_to_msi_list(dev) (&(dev)->msi_list)
105 #define first_msi_entry(dev) \
106 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
107 #define for_each_msi_entry(desc, dev) \
108 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
109
110 #ifdef CONFIG_PCI_MSI
111 #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
112 #define for_each_pci_msi_entry(desc, pdev) \
113 for_each_msi_entry((desc), &(pdev)->dev)
114
115 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
116 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
117 #else /* CONFIG_PCI_MSI */
118 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
119 {
120 return NULL;
121 }
122 #endif /* CONFIG_PCI_MSI */
123
124 struct msi_desc *alloc_msi_entry(struct device *dev);
125 void free_msi_entry(struct msi_desc *entry);
126 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
127 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
128 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
129
130 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
131 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
132 void pci_msi_mask_irq(struct irq_data *data);
133 void pci_msi_unmask_irq(struct irq_data *data);
134
135 /* Conversion helpers. Should be removed after merging */
136 static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
137 {
138 __pci_write_msi_msg(entry, msg);
139 }
140 static inline void write_msi_msg(int irq, struct msi_msg *msg)
141 {
142 pci_write_msi_msg(irq, msg);
143 }
144 static inline void mask_msi_irq(struct irq_data *data)
145 {
146 pci_msi_mask_irq(data);
147 }
148 static inline void unmask_msi_irq(struct irq_data *data)
149 {
150 pci_msi_unmask_irq(data);
151 }
152
153 /*
154 * The arch hooks to setup up msi irqs. Those functions are
155 * implemented as weak symbols so that they /can/ be overriden by
156 * architecture specific code if needed.
157 */
158 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
159 void arch_teardown_msi_irq(unsigned int irq);
160 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
161 void arch_teardown_msi_irqs(struct pci_dev *dev);
162 void arch_restore_msi_irqs(struct pci_dev *dev);
163
164 void default_teardown_msi_irqs(struct pci_dev *dev);
165 void default_restore_msi_irqs(struct pci_dev *dev);
166
167 struct msi_controller {
168 struct module *owner;
169 struct device *dev;
170 struct device_node *of_node;
171 struct list_head list;
172
173 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
174 struct msi_desc *desc);
175 int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
176 int nvec, int type);
177 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
178 };
179
180 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
181
182 #include <linux/irqhandler.h>
183 #include <asm/msi.h>
184
185 struct irq_domain;
186 struct irq_domain_ops;
187 struct irq_chip;
188 struct device_node;
189 struct fwnode_handle;
190 struct msi_domain_info;
191
192 /**
193 * struct msi_domain_ops - MSI interrupt domain callbacks
194 * @get_hwirq: Retrieve the resulting hw irq number
195 * @msi_init: Domain specific init function for MSI interrupts
196 * @msi_free: Domain specific function to free a MSI interrupts
197 * @msi_check: Callback for verification of the domain/info/dev data
198 * @msi_prepare: Prepare the allocation of the interrupts in the domain
199 * @msi_finish: Optional callback to finalize the allocation
200 * @set_desc: Set the msi descriptor for an interrupt
201 * @handle_error: Optional error handler if the allocation fails
202 *
203 * @get_hwirq, @msi_init and @msi_free are callbacks used by
204 * msi_create_irq_domain() and related interfaces
205 *
206 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
207 * are callbacks used by msi_domain_alloc_irqs() and related
208 * interfaces which are based on msi_desc.
209 */
210 struct msi_domain_ops {
211 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
212 msi_alloc_info_t *arg);
213 int (*msi_init)(struct irq_domain *domain,
214 struct msi_domain_info *info,
215 unsigned int virq, irq_hw_number_t hwirq,
216 msi_alloc_info_t *arg);
217 void (*msi_free)(struct irq_domain *domain,
218 struct msi_domain_info *info,
219 unsigned int virq);
220 int (*msi_check)(struct irq_domain *domain,
221 struct msi_domain_info *info,
222 struct device *dev);
223 int (*msi_prepare)(struct irq_domain *domain,
224 struct device *dev, int nvec,
225 msi_alloc_info_t *arg);
226 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
227 void (*set_desc)(msi_alloc_info_t *arg,
228 struct msi_desc *desc);
229 int (*handle_error)(struct irq_domain *domain,
230 struct msi_desc *desc, int error);
231 };
232
233 /**
234 * struct msi_domain_info - MSI interrupt domain data
235 * @flags: Flags to decribe features and capabilities
236 * @ops: The callback data structure
237 * @chip: Optional: associated interrupt chip
238 * @chip_data: Optional: associated interrupt chip data
239 * @handler: Optional: associated interrupt flow handler
240 * @handler_data: Optional: associated interrupt flow handler data
241 * @handler_name: Optional: associated interrupt flow handler name
242 * @data: Optional: domain specific data
243 */
244 struct msi_domain_info {
245 u32 flags;
246 struct msi_domain_ops *ops;
247 struct irq_chip *chip;
248 void *chip_data;
249 irq_flow_handler_t handler;
250 void *handler_data;
251 const char *handler_name;
252 void *data;
253 };
254
255 /* Flags for msi_domain_info */
256 enum {
257 /*
258 * Init non implemented ops callbacks with default MSI domain
259 * callbacks.
260 */
261 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
262 /*
263 * Init non implemented chip callbacks with default MSI chip
264 * callbacks.
265 */
266 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
267 /* Build identity map between hwirq and irq */
268 MSI_FLAG_IDENTITY_MAP = (1 << 2),
269 /* Support multiple PCI MSI interrupts */
270 MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
271 /* Support PCI MSIX interrupts */
272 MSI_FLAG_PCI_MSIX = (1 << 4),
273 };
274
275 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
276 bool force);
277
278 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
279 struct msi_domain_info *info,
280 struct irq_domain *parent);
281 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
282 int nvec);
283 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
284 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
285
286 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
287 struct msi_domain_info *info,
288 struct irq_domain *parent);
289 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
290 irq_write_msi_msg_t write_msi_msg);
291 void platform_msi_domain_free_irqs(struct device *dev);
292
293 /* When an MSI domain is used as an intermediate domain */
294 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
295 int nvec, msi_alloc_info_t *args);
296 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
297 int virq, int nvec, msi_alloc_info_t *args);
298 struct irq_domain *
299 platform_msi_create_device_domain(struct device *dev,
300 unsigned int nvec,
301 irq_write_msi_msg_t write_msi_msg,
302 const struct irq_domain_ops *ops,
303 void *host_data);
304 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
305 unsigned int nr_irqs);
306 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
307 unsigned int nvec);
308 void *platform_msi_get_host_data(struct irq_domain *domain);
309 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
310
311 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
312 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
313 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
314 struct msi_domain_info *info,
315 struct irq_domain *parent);
316 int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
317 int nvec, int type);
318 void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
319 struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
320 struct msi_domain_info *info, struct irq_domain *parent);
321
322 irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
323 struct msi_desc *desc);
324 int pci_msi_domain_check_cap(struct irq_domain *domain,
325 struct msi_domain_info *info, struct device *dev);
326 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
327 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
328 #else
329 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
330 {
331 return NULL;
332 }
333 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
334
335 #endif /* LINUX_MSI_H */
This page took 0.036572 seconds and 5 git commands to generate.