2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
25 #include <subdev/timer.h>
27 #include <nvif/class.h>
30 gk20a_gr_init_dtor(struct gf100_gr_pack
*pack
)
41 static struct gf100_gr_pack
*
42 gk20a_gr_av_to_init(struct gf100_gr_fuc
*fuc
)
44 struct gf100_gr_init
*init
;
45 struct gf100_gr_pack
*pack
;
46 const int nent
= (fuc
->size
/ sizeof(struct gk20a_fw_av
));
49 pack
= vzalloc((sizeof(*pack
) * 2) + (sizeof(*init
) * (nent
+ 1)));
51 return ERR_PTR(-ENOMEM
);
53 init
= (void *)(pack
+ 2);
57 for (i
= 0; i
< nent
; i
++) {
58 struct gf100_gr_init
*ent
= &init
[i
];
59 struct gk20a_fw_av
*av
= &((struct gk20a_fw_av
*)fuc
->data
)[i
];
77 static struct gf100_gr_pack
*
78 gk20a_gr_aiv_to_init(struct gf100_gr_fuc
*fuc
)
80 struct gf100_gr_init
*init
;
81 struct gf100_gr_pack
*pack
;
82 const int nent
= (fuc
->size
/ sizeof(struct gk20a_fw_aiv
));
85 pack
= vzalloc((sizeof(*pack
) * 2) + (sizeof(*init
) * (nent
+ 1)));
87 return ERR_PTR(-ENOMEM
);
89 init
= (void *)(pack
+ 2);
93 for (i
= 0; i
< nent
; i
++) {
94 struct gf100_gr_init
*ent
= &init
[i
];
95 struct gk20a_fw_aiv
*av
= &((struct gk20a_fw_aiv
*)fuc
->data
)[i
];
106 static struct gf100_gr_pack
*
107 gk20a_gr_av_to_method(struct gf100_gr_fuc
*fuc
)
109 struct gf100_gr_init
*init
;
110 struct gf100_gr_pack
*pack
;
111 /* We don't suppose we will initialize more than 16 classes here... */
112 static const unsigned int max_classes
= 16;
113 const int nent
= (fuc
->size
/ sizeof(struct gk20a_fw_av
));
117 pack
= vzalloc((sizeof(*pack
) * max_classes
) +
118 (sizeof(*init
) * (nent
+ 1)));
120 return ERR_PTR(-ENOMEM
);
122 init
= (void *)(pack
+ max_classes
);
124 for (i
= 0; i
< nent
; i
++) {
125 struct gf100_gr_init
*ent
= &init
[i
];
126 struct gk20a_fw_av
*av
= &((struct gk20a_fw_av
*)fuc
->data
)[i
];
127 u32
class = av
->addr
& 0xffff;
128 u32 addr
= (av
->addr
& 0xffff0000) >> 14;
130 if (prevclass
!= class) {
131 pack
[classidx
].init
= ent
;
132 pack
[classidx
].type
= class;
134 if (++classidx
>= max_classes
) {
136 return ERR_PTR(-ENOSPC
);
141 ent
->data
= av
->data
;
150 gk20a_gr_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
151 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
152 struct nvkm_object
**pobject
)
156 struct gf100_gr_fuc fuc
;
158 err
= gf100_gr_ctor(parent
, engine
, oclass
, data
, size
, pobject
);
162 gr
= (void *)*pobject
;
164 err
= gf100_gr_ctor_fw(gr
, "sw_nonctx", &fuc
);
167 gr
->fuc_sw_nonctx
= gk20a_gr_av_to_init(&fuc
);
168 gf100_gr_dtor_fw(&fuc
);
169 if (IS_ERR(gr
->fuc_sw_nonctx
))
170 return PTR_ERR(gr
->fuc_sw_nonctx
);
172 err
= gf100_gr_ctor_fw(gr
, "sw_ctx", &fuc
);
175 gr
->fuc_sw_ctx
= gk20a_gr_aiv_to_init(&fuc
);
176 gf100_gr_dtor_fw(&fuc
);
177 if (IS_ERR(gr
->fuc_sw_ctx
))
178 return PTR_ERR(gr
->fuc_sw_ctx
);
180 err
= gf100_gr_ctor_fw(gr
, "sw_bundle_init", &fuc
);
183 gr
->fuc_bundle
= gk20a_gr_av_to_init(&fuc
);
184 gf100_gr_dtor_fw(&fuc
);
185 if (IS_ERR(gr
->fuc_bundle
))
186 return PTR_ERR(gr
->fuc_bundle
);
188 err
= gf100_gr_ctor_fw(gr
, "sw_method_init", &fuc
);
191 gr
->fuc_method
= gk20a_gr_av_to_method(&fuc
);
192 gf100_gr_dtor_fw(&fuc
);
193 if (IS_ERR(gr
->fuc_method
))
194 return PTR_ERR(gr
->fuc_method
);
200 gk20a_gr_dtor(struct nvkm_object
*object
)
202 struct gf100_gr
*gr
= (void *)object
;
204 gk20a_gr_init_dtor(gr
->fuc_method
);
205 gk20a_gr_init_dtor(gr
->fuc_bundle
);
206 gk20a_gr_init_dtor(gr
->fuc_sw_ctx
);
207 gk20a_gr_init_dtor(gr
->fuc_sw_nonctx
);
209 gf100_gr_dtor(object
);
213 gk20a_gr_wait_mem_scrubbing(struct gf100_gr
*gr
)
215 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
216 struct nvkm_device
*device
= subdev
->device
;
218 if (nvkm_msec(device
, 2000,
219 if (!(nvkm_rd32(device
, 0x40910c) & 0x00000006))
222 nvkm_error(subdev
, "FECS mem scrubbing timeout\n");
226 if (nvkm_msec(device
, 2000,
227 if (!(nvkm_rd32(device
, 0x41a10c) & 0x00000006))
230 nvkm_error(subdev
, "GPCCS mem scrubbing timeout\n");
238 gk20a_gr_set_hww_esr_report_mask(struct gf100_gr
*gr
)
240 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
241 nvkm_wr32(device
, 0x419e44, 0x1ffffe);
242 nvkm_wr32(device
, 0x419e4c, 0x7f);
246 gk20a_gr_init(struct nvkm_object
*object
)
248 struct gk20a_gr_oclass
*oclass
= (void *)object
->oclass
;
249 struct gf100_gr
*gr
= (void *)object
;
250 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
251 const u32 magicgpc918
= DIV_ROUND_UP(0x00800000, gr
->tpc_total
);
252 u32 data
[TPC_MAX
/ 8] = {};
257 ret
= nvkm_gr_init(&gr
->base
);
262 nvkm_wr32(device
, 0x40802c, 0x1);
264 gf100_gr_mmio(gr
, gr
->fuc_sw_nonctx
);
266 ret
= gk20a_gr_wait_mem_scrubbing(gr
);
270 ret
= gf100_gr_wait_idle(gr
);
274 /* MMU debug buffer */
275 nvkm_wr32(device
, 0x100cc8, nvkm_memory_addr(gr
->unk4188b4
) >> 8);
276 nvkm_wr32(device
, 0x100ccc, nvkm_memory_addr(gr
->unk4188b8
) >> 8);
278 if (oclass
->init_gpc_mmu
)
279 oclass
->init_gpc_mmu(gr
);
281 /* Set the PE as stream master */
282 nvkm_mask(device
, 0x503018, 0x1, 0x1);
285 memset(data
, 0x00, sizeof(data
));
286 memcpy(tpcnr
, gr
->tpc_nr
, sizeof(gr
->tpc_nr
));
287 for (i
= 0, gpc
= -1; i
< gr
->tpc_total
; i
++) {
289 gpc
= (gpc
+ 1) % gr
->gpc_nr
;
290 } while (!tpcnr
[gpc
]);
291 tpc
= gr
->tpc_nr
[gpc
] - tpcnr
[gpc
]--;
293 data
[i
/ 8] |= tpc
<< ((i
% 8) * 4);
296 nvkm_wr32(device
, GPC_BCAST(0x0980), data
[0]);
297 nvkm_wr32(device
, GPC_BCAST(0x0984), data
[1]);
298 nvkm_wr32(device
, GPC_BCAST(0x0988), data
[2]);
299 nvkm_wr32(device
, GPC_BCAST(0x098c), data
[3]);
301 for (gpc
= 0; gpc
< gr
->gpc_nr
; gpc
++) {
302 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0914),
303 gr
->magic_not_rop_nr
<< 8 | gr
->tpc_nr
[gpc
]);
304 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0910), 0x00040000 |
306 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0918), magicgpc918
);
309 nvkm_wr32(device
, GPC_BCAST(0x3fd4), magicgpc918
);
311 /* Enable FIFO access */
312 nvkm_wr32(device
, 0x400500, 0x00010001);
314 /* Enable interrupts */
315 nvkm_wr32(device
, 0x400100, 0xffffffff);
316 nvkm_wr32(device
, 0x40013c, 0xffffffff);
318 /* Enable FECS error interrupts */
319 nvkm_wr32(device
, 0x409c24, 0x000f0000);
321 /* Enable hardware warning exceptions */
322 nvkm_wr32(device
, 0x404000, 0xc0000000);
323 nvkm_wr32(device
, 0x404600, 0xc0000000);
325 if (oclass
->set_hww_esr_report_mask
)
326 oclass
->set_hww_esr_report_mask(gr
);
328 /* Enable TPC exceptions per GPC */
329 nvkm_wr32(device
, 0x419d0c, 0x2);
330 nvkm_wr32(device
, 0x41ac94, (((1 << gr
->tpc_total
) - 1) & 0xff) << 16);
332 /* Reset and enable all exceptions */
333 nvkm_wr32(device
, 0x400108, 0xffffffff);
334 nvkm_wr32(device
, 0x400138, 0xffffffff);
335 nvkm_wr32(device
, 0x400118, 0xffffffff);
336 nvkm_wr32(device
, 0x400130, 0xffffffff);
337 nvkm_wr32(device
, 0x40011c, 0xffffffff);
338 nvkm_wr32(device
, 0x400134, 0xffffffff);
340 gf100_gr_zbc_init(gr
);
342 return gf100_gr_init_ctxctl(gr
);
345 static const struct gf100_gr_func
347 .grctx
= &gk20a_grctx
,
349 { -1, -1, FERMI_TWOD_A
},
350 { -1, -1, KEPLER_INLINE_TO_MEMORY_A
},
351 { -1, -1, KEPLER_C
, &gf100_fermi
},
352 { -1, -1, KEPLER_COMPUTE_A
},
358 gk20a_gr_oclass
= &(struct gk20a_gr_oclass
) {
360 .base
.handle
= NV_ENGINE(GR
, 0xea),
361 .base
.ofuncs
= &(struct nvkm_ofuncs
) {
362 .ctor
= gk20a_gr_ctor
,
363 .dtor
= gk20a_gr_dtor
,
364 .init
= gk20a_gr_init
,
365 .fini
= _nvkm_gr_fini
,
370 .set_hww_esr_report_mask
= gk20a_gr_set_hww_esr_report_mask
,