powerpc/powernv/ioda1: Improve DMA32 segment track
[deliverable/linux.git] / arch / powerpc / platforms / powernv / pci.h
CommitLineData
61305a96
BH
1#ifndef __POWERNV_PCI_H
2#define __POWERNV_PCI_H
3
4struct pci_dn;
5
6enum pnv_phb_type {
2de50e96
RC
7 PNV_PHB_IODA1 = 0,
8 PNV_PHB_IODA2 = 1,
9 PNV_PHB_NPU = 2,
61305a96
BH
10};
11
cee72d5b
BH
12/* Precise PHB model for error management */
13enum pnv_phb_model {
14 PNV_PHB_MODEL_UNKNOWN,
cee72d5b 15 PNV_PHB_MODEL_P7IOC,
aa0c033f 16 PNV_PHB_MODEL_PHB3,
5d2aa710 17 PNV_PHB_MODEL_NPU,
cee72d5b
BH
18};
19
5c9d6d75 20#define PNV_PCI_DIAG_BUF_SIZE 8192
7ebdf956
GS
21#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
22#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
23#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
262af557
GC
24#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
25#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
781a868f 26#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
5d2aa710 27#define PNV_IODA_PE_PEER (1 << 6) /* PE has peers */
cee72d5b 28
184cd4a3 29/* Data associated with a PE, including IOMMU tracking etc.. */
4cce9550 30struct pnv_phb;
184cd4a3 31struct pnv_ioda_pe {
7ebdf956 32 unsigned long flags;
4cce9550 33 struct pnv_phb *phb;
7ebdf956 34
5d2aa710
AP
35#define PNV_IODA_MAX_PEER_PES 8
36 struct pnv_ioda_pe *peers[PNV_IODA_MAX_PEER_PES];
37
184cd4a3
BH
38 /* A PE can be associated with a single device or an
39 * entire bus (& children). In the former case, pdev
40 * is populated, in the later case, pbus is.
41 */
781a868f
WY
42#ifdef CONFIG_PCI_IOV
43 struct pci_dev *parent_dev;
44#endif
184cd4a3
BH
45 struct pci_dev *pdev;
46 struct pci_bus *pbus;
47
48 /* Effective RID (device RID for a device PE and base bus
49 * RID with devfn 0 for a bus PE)
50 */
51 unsigned int rid;
52
53 /* PE number */
54 unsigned int pe_number;
55
184cd4a3 56 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
b348aa65 57 struct iommu_table_group table_group;
184cd4a3 58
cd15b048
BH
59 /* 64-bit TCE bypass region */
60 bool tce_bypass_enabled;
61 uint64_t tce_bypass_base;
184cd4a3
BH
62
63 /* MSIs. MVE index is identical for for 32 and 64 bit MSI
64 * and -1 if not supported. (It's actually identical to the
65 * PE number)
66 */
67 int mve_number;
68
262af557
GC
69 /* PEs in compound case */
70 struct pnv_ioda_pe *master;
71 struct list_head slaves;
72
184cd4a3 73 /* Link in list of PE#s */
7ebdf956 74 struct list_head list;
184cd4a3
BH
75};
76
f5bc6b70
GS
77#define PNV_PHB_FLAG_EEH (1 << 0)
78
61305a96
BH
79struct pnv_phb {
80 struct pci_controller *hose;
81 enum pnv_phb_type type;
cee72d5b 82 enum pnv_phb_model model;
8747f363 83 u64 hub_id;
61305a96 84 u64 opal_id;
f5bc6b70 85 int flags;
61305a96 86 void __iomem *regs;
db1266c8 87 int initialized;
61305a96
BH
88 spinlock_t lock;
89
37c367f2 90#ifdef CONFIG_DEBUG_FS
7f52a526 91 int has_dbgfs;
37c367f2
GS
92 struct dentry *dbgfs;
93#endif
94
c1a2562a 95#ifdef CONFIG_PCI_MSI
c1a2562a 96 unsigned int msi_base;
c1a2562a 97 unsigned int msi32_support;
fb1b55d6 98 struct msi_bitmap msi_bmp;
c1a2562a
BH
99#endif
100 int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
137436c9
GS
101 unsigned int hwirq, unsigned int virq,
102 unsigned int is_64, struct msi_msg *msg);
61305a96
BH
103 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
104 void (*fixup_phb)(struct pci_controller *hose);
262af557 105 int (*init_m64)(struct pnv_phb *phb);
96a2f92b
GS
106 void (*reserve_m64_pe)(struct pci_bus *bus,
107 unsigned long *pe_bitmap, bool all);
689ee8c9 108 unsigned int (*pick_m64_pe)(struct pci_bus *bus, bool all);
49dec922
GS
109 int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
110 void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
111 int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
61305a96 112
2de50e96
RC
113 struct {
114 /* Global bridge info */
92b8f137
GS
115 unsigned int total_pe_num;
116 unsigned int reserved_pe_idx;
2de50e96
RC
117
118 /* 32-bit MMIO window */
119 unsigned int m32_size;
120 unsigned int m32_segsize;
121 unsigned int m32_pci_base;
122
123 /* 64-bit MMIO window */
124 unsigned int m64_bar_idx;
125 unsigned long m64_size;
126 unsigned long m64_segsize;
127 unsigned long m64_base;
128 unsigned long m64_bar_alloc;
129
130 /* IO ports */
131 unsigned int io_size;
132 unsigned int io_segsize;
133 unsigned int io_pci_base;
134
13ce7598 135 /* PE allocation */
2de50e96 136 struct mutex pe_alloc_mutex;
13ce7598
GS
137 unsigned long *pe_alloc;
138 struct pnv_ioda_pe *pe_array;
2de50e96
RC
139
140 /* M32 & IO segment maps */
93289d8c 141 unsigned int *m64_segmap;
2de50e96
RC
142 unsigned int *m32_segmap;
143 unsigned int *io_segmap;
2de50e96 144
2b923ed1
GS
145 /* DMA32 segment maps - IODA1 only */
146 unsigned int dma32_count;
147 unsigned int *dma32_segmap;
148
2de50e96
RC
149 /* IRQ chip */
150 int irq_chip_init;
151 struct irq_chip irq_chip;
152
153 /* Sorted list of used PE's based
154 * on the sequence of creation
155 */
156 struct list_head pe_list;
157 struct mutex pe_list_mutex;
158
159 /* Reverse map of PEs, will have to extend if
160 * we are to support more than 256 PEs, indexed
161 * bus { bus, devfn }
162 */
163 unsigned char pe_rmap[0x10000];
164
2de50e96
RC
165 /* TCE cache invalidate registers (physical and
166 * remapped)
167 */
168 phys_addr_t tce_inval_reg_phys;
169 __be64 __iomem *tce_inval_reg;
170 } ioda;
cee72d5b 171
ca1de5de 172 /* PHB and hub status structure */
cee72d5b
BH
173 union {
174 unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
175 struct OpalIoP7IOCPhbErrorData p7ioc;
93aef2a7 176 struct OpalIoPhb3ErrorData phb3;
ca1de5de 177 struct OpalIoP7IOCErrorData hub_diag;
cee72d5b 178 } diag;
ca1de5de 179
61305a96
BH
180};
181
182extern struct pci_ops pnv_pci_ops;
da004c36
AK
183extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
184 unsigned long uaddr, enum dma_data_direction direction,
185 struct dma_attrs *attrs);
186extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
05c6cfb9
AK
187extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
188 unsigned long *hpa, enum dma_data_direction *direction);
da004c36 189extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
61305a96 190
93aef2a7
GS
191void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
192 unsigned char *log_buff);
3532a741 193int pnv_pci_cfg_read(struct pci_dn *pdn,
9bf41be6 194 int where, int size, u32 *val);
3532a741 195int pnv_pci_cfg_write(struct pci_dn *pdn,
9bf41be6 196 int where, int size, u32 val);
0eaf4def
AK
197extern struct iommu_table *pnv_pci_table_alloc(int nid);
198
199extern long pnv_pci_link_table_and_group(int node, int num,
200 struct iommu_table *tbl,
201 struct iommu_table_group *table_group);
202extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
203 struct iommu_table_group *table_group);
61305a96
BH
204extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
205 void *tce_mem, u64 tce_size,
8fa5d454 206 u64 dma_offset, unsigned page_shift);
184cd4a3 207extern void pnv_pci_init_ioda_hub(struct device_node *np);
aa0c033f 208extern void pnv_pci_init_ioda2_phb(struct device_node *np);
5d2aa710 209extern void pnv_pci_init_npu_phb(struct device_node *np);
4cce9550 210extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
3ad26e5c 211 __be64 *startp, __be64 *endp, bool rm);
d92a208d 212extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
cadf364d 213extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
73ed148a 214
92ae0353 215extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
1bc74f1c 216extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
92ae0353
DA
217extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
218extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
219
5d2aa710
AP
220/* Nvlink functions */
221extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe);
222extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
223 struct iommu_table *tbl,
224 unsigned long index,
225 unsigned long npages,
226 bool rm);
227extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
228extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
229extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled);
230extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask);
231
61305a96 232#endif /* __POWERNV_PCI_H */
This page took 0.289397 seconds and 5 git commands to generate.