Commit | Line | Data |
---|---|---|
0ca87f05 ME |
1 | /* bpf_jit_comp.c: BPF JIT compiler for PPC64 |
2 | * | |
3 | * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation | |
4 | * | |
5 | * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; version 2 | |
10 | * of the License. | |
11 | */ | |
12 | #include <linux/moduleloader.h> | |
13 | #include <asm/cacheflush.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/filter.h> | |
16 | #include "bpf_jit.h" | |
17 | ||
18 | #ifndef __BIG_ENDIAN | |
19 | /* There are endianness assumptions herein. */ | |
20 | #error "Little-endian PPC not supported in BPF compiler" | |
21 | #endif | |
22 | ||
23 | int bpf_jit_enable __read_mostly; | |
24 | ||
25 | ||
26 | static inline void bpf_flush_icache(void *start, void *end) | |
27 | { | |
28 | smp_wmb(); | |
29 | flush_icache_range((unsigned long)start, (unsigned long)end); | |
30 | } | |
31 | ||
32 | static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, | |
33 | struct codegen_context *ctx) | |
34 | { | |
35 | int i; | |
36 | const struct sock_filter *filter = fp->insns; | |
37 | ||
38 | if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { | |
39 | /* Make stackframe */ | |
40 | if (ctx->seen & SEEN_DATAREF) { | |
41 | /* If we call any helpers (for loads), save LR */ | |
c75df6f9 | 42 | EMIT(PPC_INST_MFLR | __PPC_RT(R0)); |
0ca87f05 ME |
43 | PPC_STD(0, 1, 16); |
44 | ||
45 | /* Back up non-volatile regs. */ | |
46 | PPC_STD(r_D, 1, -(8*(32-r_D))); | |
47 | PPC_STD(r_HL, 1, -(8*(32-r_HL))); | |
48 | } | |
49 | if (ctx->seen & SEEN_MEM) { | |
50 | /* | |
51 | * Conditionally save regs r15-r31 as some will be used | |
52 | * for M[] data. | |
53 | */ | |
54 | for (i = r_M; i < (r_M+16); i++) { | |
55 | if (ctx->seen & (1 << (i-r_M))) | |
56 | PPC_STD(i, 1, -(8*(32-i))); | |
57 | } | |
58 | } | |
c75df6f9 | 59 | EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | |
0ca87f05 ME |
60 | (-BPF_PPC_STACKFRAME & 0xfffc)); |
61 | } | |
62 | ||
63 | if (ctx->seen & SEEN_DATAREF) { | |
64 | /* | |
65 | * If this filter needs to access skb data, | |
66 | * prepare r_D and r_HL: | |
67 | * r_HL = skb->len - skb->data_len | |
68 | * r_D = skb->data | |
69 | */ | |
70 | PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, | |
71 | data_len)); | |
72 | PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); | |
73 | PPC_SUB(r_HL, r_HL, r_scratch1); | |
74 | PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); | |
75 | } | |
76 | ||
77 | if (ctx->seen & SEEN_XREG) { | |
78 | /* | |
79 | * TODO: Could also detect whether first instr. sets X and | |
80 | * avoid this (as below, with A). | |
81 | */ | |
82 | PPC_LI(r_X, 0); | |
83 | } | |
84 | ||
85 | switch (filter[0].code) { | |
86 | case BPF_S_RET_K: | |
87 | case BPF_S_LD_W_LEN: | |
88 | case BPF_S_ANC_PROTOCOL: | |
89 | case BPF_S_ANC_IFINDEX: | |
90 | case BPF_S_ANC_MARK: | |
91 | case BPF_S_ANC_RXHASH: | |
92 | case BPF_S_ANC_CPU: | |
93 | case BPF_S_ANC_QUEUE: | |
94 | case BPF_S_LD_W_ABS: | |
95 | case BPF_S_LD_H_ABS: | |
96 | case BPF_S_LD_B_ABS: | |
97 | /* first instruction sets A register (or is RET 'constant') */ | |
98 | break; | |
99 | default: | |
100 | /* make sure we dont leak kernel information to user */ | |
101 | PPC_LI(r_A, 0); | |
102 | } | |
103 | } | |
104 | ||
105 | static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) | |
106 | { | |
107 | int i; | |
108 | ||
109 | if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { | |
110 | PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); | |
111 | if (ctx->seen & SEEN_DATAREF) { | |
112 | PPC_LD(0, 1, 16); | |
113 | PPC_MTLR(0); | |
114 | PPC_LD(r_D, 1, -(8*(32-r_D))); | |
115 | PPC_LD(r_HL, 1, -(8*(32-r_HL))); | |
116 | } | |
117 | if (ctx->seen & SEEN_MEM) { | |
118 | /* Restore any saved non-vol registers */ | |
119 | for (i = r_M; i < (r_M+16); i++) { | |
120 | if (ctx->seen & (1 << (i-r_M))) | |
121 | PPC_LD(i, 1, -(8*(32-i))); | |
122 | } | |
123 | } | |
124 | } | |
125 | /* The RETs have left a return value in R3. */ | |
126 | ||
127 | PPC_BLR(); | |
128 | } | |
129 | ||
05be1824 JS |
130 | #define CHOOSE_LOAD_FUNC(K, func) \ |
131 | ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) | |
132 | ||
0ca87f05 ME |
133 | /* Assemble the body code between the prologue & epilogue. */ |
134 | static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |
135 | struct codegen_context *ctx, | |
136 | unsigned int *addrs) | |
137 | { | |
138 | const struct sock_filter *filter = fp->insns; | |
139 | int flen = fp->len; | |
140 | u8 *func; | |
141 | unsigned int true_cond; | |
142 | int i; | |
143 | ||
144 | /* Start of epilogue code */ | |
145 | unsigned int exit_addr = addrs[flen]; | |
146 | ||
147 | for (i = 0; i < flen; i++) { | |
148 | unsigned int K = filter[i].k; | |
149 | ||
150 | /* | |
151 | * addrs[] maps a BPF bytecode address into a real offset from | |
152 | * the start of the body code. | |
153 | */ | |
154 | addrs[i] = ctx->idx * 4; | |
155 | ||
156 | switch (filter[i].code) { | |
157 | /*** ALU ops ***/ | |
158 | case BPF_S_ALU_ADD_X: /* A += X; */ | |
159 | ctx->seen |= SEEN_XREG; | |
160 | PPC_ADD(r_A, r_A, r_X); | |
161 | break; | |
162 | case BPF_S_ALU_ADD_K: /* A += K; */ | |
163 | if (!K) | |
164 | break; | |
165 | PPC_ADDI(r_A, r_A, IMM_L(K)); | |
166 | if (K >= 32768) | |
167 | PPC_ADDIS(r_A, r_A, IMM_HA(K)); | |
168 | break; | |
169 | case BPF_S_ALU_SUB_X: /* A -= X; */ | |
170 | ctx->seen |= SEEN_XREG; | |
171 | PPC_SUB(r_A, r_A, r_X); | |
172 | break; | |
173 | case BPF_S_ALU_SUB_K: /* A -= K */ | |
174 | if (!K) | |
175 | break; | |
176 | PPC_ADDI(r_A, r_A, IMM_L(-K)); | |
177 | if (K >= 32768) | |
178 | PPC_ADDIS(r_A, r_A, IMM_HA(-K)); | |
179 | break; | |
180 | case BPF_S_ALU_MUL_X: /* A *= X; */ | |
181 | ctx->seen |= SEEN_XREG; | |
182 | PPC_MUL(r_A, r_A, r_X); | |
183 | break; | |
184 | case BPF_S_ALU_MUL_K: /* A *= K */ | |
185 | if (K < 32768) | |
186 | PPC_MULI(r_A, r_A, K); | |
187 | else { | |
188 | PPC_LI32(r_scratch1, K); | |
189 | PPC_MUL(r_A, r_A, r_scratch1); | |
190 | } | |
191 | break; | |
192 | case BPF_S_ALU_DIV_X: /* A /= X; */ | |
193 | ctx->seen |= SEEN_XREG; | |
194 | PPC_CMPWI(r_X, 0); | |
195 | if (ctx->pc_ret0 != -1) { | |
196 | PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); | |
197 | } else { | |
198 | /* | |
199 | * Exit, returning 0; first pass hits here | |
200 | * (longer worst-case code size). | |
201 | */ | |
202 | PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); | |
203 | PPC_LI(r_ret, 0); | |
204 | PPC_JMP(exit_addr); | |
205 | } | |
206 | PPC_DIVWU(r_A, r_A, r_X); | |
207 | break; | |
208 | case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ | |
209 | PPC_LI32(r_scratch1, K); | |
210 | /* Top 32 bits of 64bit result -> A */ | |
211 | PPC_MULHWU(r_A, r_A, r_scratch1); | |
212 | break; | |
213 | case BPF_S_ALU_AND_X: | |
214 | ctx->seen |= SEEN_XREG; | |
215 | PPC_AND(r_A, r_A, r_X); | |
216 | break; | |
217 | case BPF_S_ALU_AND_K: | |
218 | if (!IMM_H(K)) | |
219 | PPC_ANDI(r_A, r_A, K); | |
220 | else { | |
221 | PPC_LI32(r_scratch1, K); | |
222 | PPC_AND(r_A, r_A, r_scratch1); | |
223 | } | |
224 | break; | |
225 | case BPF_S_ALU_OR_X: | |
226 | ctx->seen |= SEEN_XREG; | |
227 | PPC_OR(r_A, r_A, r_X); | |
228 | break; | |
229 | case BPF_S_ALU_OR_K: | |
230 | if (IMM_L(K)) | |
231 | PPC_ORI(r_A, r_A, IMM_L(K)); | |
232 | if (K >= 65536) | |
233 | PPC_ORIS(r_A, r_A, IMM_H(K)); | |
234 | break; | |
02871903 DB |
235 | case BPF_S_ANC_ALU_XOR_X: |
236 | case BPF_S_ALU_XOR_X: /* A ^= X */ | |
237 | ctx->seen |= SEEN_XREG; | |
238 | PPC_XOR(r_A, r_A, r_X); | |
239 | break; | |
240 | case BPF_S_ALU_XOR_K: /* A ^= K */ | |
241 | if (IMM_L(K)) | |
242 | PPC_XORI(r_A, r_A, IMM_L(K)); | |
243 | if (K >= 65536) | |
244 | PPC_XORIS(r_A, r_A, IMM_H(K)); | |
245 | break; | |
0ca87f05 ME |
246 | case BPF_S_ALU_LSH_X: /* A <<= X; */ |
247 | ctx->seen |= SEEN_XREG; | |
248 | PPC_SLW(r_A, r_A, r_X); | |
249 | break; | |
250 | case BPF_S_ALU_LSH_K: | |
251 | if (K == 0) | |
252 | break; | |
253 | else | |
254 | PPC_SLWI(r_A, r_A, K); | |
255 | break; | |
256 | case BPF_S_ALU_RSH_X: /* A >>= X; */ | |
257 | ctx->seen |= SEEN_XREG; | |
258 | PPC_SRW(r_A, r_A, r_X); | |
259 | break; | |
260 | case BPF_S_ALU_RSH_K: /* A >>= K; */ | |
261 | if (K == 0) | |
262 | break; | |
263 | else | |
264 | PPC_SRWI(r_A, r_A, K); | |
265 | break; | |
266 | case BPF_S_ALU_NEG: | |
267 | PPC_NEG(r_A, r_A); | |
268 | break; | |
269 | case BPF_S_RET_K: | |
270 | PPC_LI32(r_ret, K); | |
271 | if (!K) { | |
272 | if (ctx->pc_ret0 == -1) | |
273 | ctx->pc_ret0 = i; | |
274 | } | |
275 | /* | |
276 | * If this isn't the very last instruction, branch to | |
277 | * the epilogue if we've stuff to clean up. Otherwise, | |
278 | * if there's nothing to tidy, just return. If we /are/ | |
279 | * the last instruction, we're about to fall through to | |
280 | * the epilogue to return. | |
281 | */ | |
282 | if (i != flen - 1) { | |
283 | /* | |
284 | * Note: 'seen' is properly valid only on pass | |
285 | * #2. Both parts of this conditional are the | |
286 | * same instruction size though, meaning the | |
287 | * first pass will still correctly determine the | |
288 | * code size/addresses. | |
289 | */ | |
290 | if (ctx->seen) | |
291 | PPC_JMP(exit_addr); | |
292 | else | |
293 | PPC_BLR(); | |
294 | } | |
295 | break; | |
296 | case BPF_S_RET_A: | |
297 | PPC_MR(r_ret, r_A); | |
298 | if (i != flen - 1) { | |
299 | if (ctx->seen) | |
300 | PPC_JMP(exit_addr); | |
301 | else | |
302 | PPC_BLR(); | |
303 | } | |
304 | break; | |
305 | case BPF_S_MISC_TAX: /* X = A */ | |
306 | PPC_MR(r_X, r_A); | |
307 | break; | |
308 | case BPF_S_MISC_TXA: /* A = X */ | |
309 | ctx->seen |= SEEN_XREG; | |
310 | PPC_MR(r_A, r_X); | |
311 | break; | |
312 | ||
313 | /*** Constant loads/M[] access ***/ | |
314 | case BPF_S_LD_IMM: /* A = K */ | |
315 | PPC_LI32(r_A, K); | |
316 | break; | |
317 | case BPF_S_LDX_IMM: /* X = K */ | |
318 | PPC_LI32(r_X, K); | |
319 | break; | |
320 | case BPF_S_LD_MEM: /* A = mem[K] */ | |
321 | PPC_MR(r_A, r_M + (K & 0xf)); | |
322 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | |
323 | break; | |
324 | case BPF_S_LDX_MEM: /* X = mem[K] */ | |
325 | PPC_MR(r_X, r_M + (K & 0xf)); | |
326 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | |
327 | break; | |
328 | case BPF_S_ST: /* mem[K] = A */ | |
329 | PPC_MR(r_M + (K & 0xf), r_A); | |
330 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | |
331 | break; | |
332 | case BPF_S_STX: /* mem[K] = X */ | |
333 | PPC_MR(r_M + (K & 0xf), r_X); | |
334 | ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); | |
335 | break; | |
336 | case BPF_S_LD_W_LEN: /* A = skb->len; */ | |
337 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); | |
338 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); | |
339 | break; | |
340 | case BPF_S_LDX_W_LEN: /* X = skb->len; */ | |
341 | PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); | |
342 | break; | |
343 | ||
344 | /*** Ancillary info loads ***/ | |
345 | ||
346 | /* None of the BPF_S_ANC* codes appear to be passed by | |
347 | * sk_chk_filter(). The interpreter and the x86 BPF | |
348 | * compiler implement them so we do too -- they may be | |
349 | * planted in future. | |
350 | */ | |
351 | case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ | |
352 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, | |
353 | protocol) != 2); | |
354 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | |
355 | protocol)); | |
356 | /* ntohs is a NOP with BE loads. */ | |
357 | break; | |
358 | case BPF_S_ANC_IFINDEX: | |
359 | PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, | |
360 | dev)); | |
361 | PPC_CMPDI(r_scratch1, 0); | |
362 | if (ctx->pc_ret0 != -1) { | |
363 | PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); | |
364 | } else { | |
365 | /* Exit, returning 0; first pass hits here. */ | |
366 | PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); | |
367 | PPC_LI(r_ret, 0); | |
368 | PPC_JMP(exit_addr); | |
369 | } | |
370 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, | |
371 | ifindex) != 4); | |
372 | PPC_LWZ_OFFS(r_A, r_scratch1, | |
373 | offsetof(struct net_device, ifindex)); | |
374 | break; | |
375 | case BPF_S_ANC_MARK: | |
376 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | |
377 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | |
378 | mark)); | |
379 | break; | |
380 | case BPF_S_ANC_RXHASH: | |
381 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); | |
382 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | |
383 | rxhash)); | |
384 | break; | |
385 | case BPF_S_ANC_QUEUE: | |
386 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, | |
387 | queue_mapping) != 2); | |
388 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | |
389 | queue_mapping)); | |
390 | break; | |
391 | case BPF_S_ANC_CPU: | |
392 | #ifdef CONFIG_SMP | |
393 | /* | |
394 | * PACA ptr is r13: | |
395 | * raw_smp_processor_id() = local_paca->paca_index | |
396 | */ | |
397 | BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, | |
398 | paca_index) != 2); | |
399 | PPC_LHZ_OFFS(r_A, 13, | |
400 | offsetof(struct paca_struct, paca_index)); | |
401 | #else | |
402 | PPC_LI(r_A, 0); | |
403 | #endif | |
404 | break; | |
405 | ||
406 | /*** Absolute loads from packet header/data ***/ | |
407 | case BPF_S_LD_W_ABS: | |
05be1824 | 408 | func = CHOOSE_LOAD_FUNC(K, sk_load_word); |
0ca87f05 ME |
409 | goto common_load; |
410 | case BPF_S_LD_H_ABS: | |
05be1824 | 411 | func = CHOOSE_LOAD_FUNC(K, sk_load_half); |
0ca87f05 ME |
412 | goto common_load; |
413 | case BPF_S_LD_B_ABS: | |
05be1824 | 414 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte); |
0ca87f05 | 415 | common_load: |
05be1824 | 416 | /* Load from [K]. */ |
0ca87f05 | 417 | ctx->seen |= SEEN_DATAREF; |
0ca87f05 ME |
418 | PPC_LI64(r_scratch1, func); |
419 | PPC_MTLR(r_scratch1); | |
420 | PPC_LI32(r_addr, K); | |
421 | PPC_BLRL(); | |
422 | /* | |
423 | * Helper returns 'lt' condition on error, and an | |
424 | * appropriate return value in r3 | |
425 | */ | |
426 | PPC_BCC(COND_LT, exit_addr); | |
427 | break; | |
428 | ||
429 | /*** Indirect loads from packet header/data ***/ | |
430 | case BPF_S_LD_W_IND: | |
431 | func = sk_load_word; | |
432 | goto common_load_ind; | |
433 | case BPF_S_LD_H_IND: | |
434 | func = sk_load_half; | |
435 | goto common_load_ind; | |
436 | case BPF_S_LD_B_IND: | |
437 | func = sk_load_byte; | |
438 | common_load_ind: | |
439 | /* | |
440 | * Load from [X + K]. Negative offsets are tested for | |
05be1824 | 441 | * in the helper functions. |
0ca87f05 ME |
442 | */ |
443 | ctx->seen |= SEEN_DATAREF | SEEN_XREG; | |
444 | PPC_LI64(r_scratch1, func); | |
445 | PPC_MTLR(r_scratch1); | |
446 | PPC_ADDI(r_addr, r_X, IMM_L(K)); | |
447 | if (K >= 32768) | |
448 | PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); | |
449 | PPC_BLRL(); | |
450 | /* If error, cr0.LT set */ | |
451 | PPC_BCC(COND_LT, exit_addr); | |
452 | break; | |
453 | ||
454 | case BPF_S_LDX_B_MSH: | |
05be1824 | 455 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); |
0ca87f05 ME |
456 | goto common_load; |
457 | break; | |
458 | ||
459 | /*** Jump and branches ***/ | |
460 | case BPF_S_JMP_JA: | |
461 | if (K != 0) | |
462 | PPC_JMP(addrs[i + 1 + K]); | |
463 | break; | |
464 | ||
465 | case BPF_S_JMP_JGT_K: | |
466 | case BPF_S_JMP_JGT_X: | |
467 | true_cond = COND_GT; | |
468 | goto cond_branch; | |
469 | case BPF_S_JMP_JGE_K: | |
470 | case BPF_S_JMP_JGE_X: | |
471 | true_cond = COND_GE; | |
472 | goto cond_branch; | |
473 | case BPF_S_JMP_JEQ_K: | |
474 | case BPF_S_JMP_JEQ_X: | |
475 | true_cond = COND_EQ; | |
476 | goto cond_branch; | |
477 | case BPF_S_JMP_JSET_K: | |
478 | case BPF_S_JMP_JSET_X: | |
479 | true_cond = COND_NE; | |
480 | /* Fall through */ | |
481 | cond_branch: | |
482 | /* same targets, can avoid doing the test :) */ | |
483 | if (filter[i].jt == filter[i].jf) { | |
484 | if (filter[i].jt > 0) | |
485 | PPC_JMP(addrs[i + 1 + filter[i].jt]); | |
486 | break; | |
487 | } | |
488 | ||
489 | switch (filter[i].code) { | |
490 | case BPF_S_JMP_JGT_X: | |
491 | case BPF_S_JMP_JGE_X: | |
492 | case BPF_S_JMP_JEQ_X: | |
493 | ctx->seen |= SEEN_XREG; | |
494 | PPC_CMPLW(r_A, r_X); | |
495 | break; | |
496 | case BPF_S_JMP_JSET_X: | |
497 | ctx->seen |= SEEN_XREG; | |
498 | PPC_AND_DOT(r_scratch1, r_A, r_X); | |
499 | break; | |
500 | case BPF_S_JMP_JEQ_K: | |
501 | case BPF_S_JMP_JGT_K: | |
502 | case BPF_S_JMP_JGE_K: | |
503 | if (K < 32768) | |
504 | PPC_CMPLWI(r_A, K); | |
505 | else { | |
506 | PPC_LI32(r_scratch1, K); | |
507 | PPC_CMPLW(r_A, r_scratch1); | |
508 | } | |
509 | break; | |
510 | case BPF_S_JMP_JSET_K: | |
511 | if (K < 32768) | |
512 | /* PPC_ANDI is /only/ dot-form */ | |
513 | PPC_ANDI(r_scratch1, r_A, K); | |
514 | else { | |
515 | PPC_LI32(r_scratch1, K); | |
516 | PPC_AND_DOT(r_scratch1, r_A, | |
517 | r_scratch1); | |
518 | } | |
519 | break; | |
520 | } | |
521 | /* Sometimes branches are constructed "backward", with | |
522 | * the false path being the branch and true path being | |
523 | * a fallthrough to the next instruction. | |
524 | */ | |
525 | if (filter[i].jt == 0) | |
526 | /* Swap the sense of the branch */ | |
527 | PPC_BCC(true_cond ^ COND_CMP_TRUE, | |
528 | addrs[i + 1 + filter[i].jf]); | |
529 | else { | |
530 | PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); | |
531 | if (filter[i].jf != 0) | |
532 | PPC_JMP(addrs[i + 1 + filter[i].jf]); | |
533 | } | |
534 | break; | |
535 | default: | |
536 | /* The filter contains something cruel & unusual. | |
537 | * We don't handle it, but also there shouldn't be | |
538 | * anything missing from our list. | |
539 | */ | |
540 | if (printk_ratelimit()) | |
541 | pr_err("BPF filter opcode %04x (@%d) unsupported\n", | |
542 | filter[i].code, i); | |
543 | return -ENOTSUPP; | |
544 | } | |
545 | ||
546 | } | |
547 | /* Set end-of-body-code address for exit. */ | |
548 | addrs[i] = ctx->idx * 4; | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
553 | void bpf_jit_compile(struct sk_filter *fp) | |
554 | { | |
555 | unsigned int proglen; | |
556 | unsigned int alloclen; | |
557 | u32 *image = NULL; | |
558 | u32 *code_base; | |
559 | unsigned int *addrs; | |
560 | struct codegen_context cgctx; | |
561 | int pass; | |
562 | int flen = fp->len; | |
563 | ||
564 | if (!bpf_jit_enable) | |
565 | return; | |
566 | ||
567 | addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); | |
568 | if (addrs == NULL) | |
569 | return; | |
570 | ||
571 | /* | |
572 | * There are multiple assembly passes as the generated code will change | |
573 | * size as it settles down, figuring out the max branch offsets/exit | |
574 | * paths required. | |
575 | * | |
576 | * The range of standard conditional branches is +/- 32Kbytes. Since | |
577 | * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to | |
578 | * finish with 8 bytes/instruction. Not feasible, so long jumps are | |
579 | * used, distinct from short branches. | |
580 | * | |
581 | * Current: | |
582 | * | |
583 | * For now, both branch types assemble to 2 words (short branches padded | |
584 | * with a NOP); this is less efficient, but assembly will always complete | |
585 | * after exactly 3 passes: | |
586 | * | |
587 | * First pass: No code buffer; Program is "faux-generated" -- no code | |
588 | * emitted but maximum size of output determined (and addrs[] filled | |
589 | * in). Also, we note whether we use M[], whether we use skb data, etc. | |
590 | * All generation choices assumed to be 'worst-case', e.g. branches all | |
591 | * far (2 instructions), return path code reduction not available, etc. | |
592 | * | |
593 | * Second pass: Code buffer allocated with size determined previously. | |
594 | * Prologue generated to support features we have seen used. Exit paths | |
595 | * determined and addrs[] is filled in again, as code may be slightly | |
596 | * smaller as a result. | |
597 | * | |
598 | * Third pass: Code generated 'for real', and branch destinations | |
599 | * determined from now-accurate addrs[] map. | |
600 | * | |
601 | * Ideal: | |
602 | * | |
603 | * If we optimise this, near branches will be shorter. On the | |
604 | * first assembly pass, we should err on the side of caution and | |
605 | * generate the biggest code. On subsequent passes, branches will be | |
606 | * generated short or long and code size will reduce. With smaller | |
607 | * code, more branches may fall into the short category, and code will | |
608 | * reduce more. | |
609 | * | |
610 | * Finally, if we see one pass generate code the same size as the | |
611 | * previous pass we have converged and should now generate code for | |
612 | * real. Allocating at the end will also save the memory that would | |
613 | * otherwise be wasted by the (small) current code shrinkage. | |
614 | * Preferably, we should do a small number of passes (e.g. 5) and if we | |
615 | * haven't converged by then, get impatient and force code to generate | |
616 | * as-is, even if the odd branch would be left long. The chances of a | |
617 | * long jump are tiny with all but the most enormous of BPF filter | |
618 | * inputs, so we should usually converge on the third pass. | |
619 | */ | |
620 | ||
621 | cgctx.idx = 0; | |
622 | cgctx.seen = 0; | |
623 | cgctx.pc_ret0 = -1; | |
624 | /* Scouting faux-generate pass 0 */ | |
625 | if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) | |
626 | /* We hit something illegal or unsupported. */ | |
627 | goto out; | |
628 | ||
629 | /* | |
630 | * Pretend to build prologue, given the features we've seen. This will | |
631 | * update ctgtx.idx as it pretends to output instructions, then we can | |
632 | * calculate total size from idx. | |
633 | */ | |
634 | bpf_jit_build_prologue(fp, 0, &cgctx); | |
635 | bpf_jit_build_epilogue(0, &cgctx); | |
636 | ||
637 | proglen = cgctx.idx * 4; | |
638 | alloclen = proglen + FUNCTION_DESCR_SIZE; | |
639 | image = module_alloc(max_t(unsigned int, alloclen, | |
640 | sizeof(struct work_struct))); | |
641 | if (!image) | |
642 | goto out; | |
643 | ||
644 | code_base = image + (FUNCTION_DESCR_SIZE/4); | |
645 | ||
646 | /* Code generation passes 1-2 */ | |
647 | for (pass = 1; pass < 3; pass++) { | |
648 | /* Now build the prologue, body code & epilogue for real. */ | |
649 | cgctx.idx = 0; | |
650 | bpf_jit_build_prologue(fp, code_base, &cgctx); | |
651 | bpf_jit_build_body(fp, code_base, &cgctx, addrs); | |
652 | bpf_jit_build_epilogue(code_base, &cgctx); | |
653 | ||
654 | if (bpf_jit_enable > 1) | |
655 | pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, | |
656 | proglen - (cgctx.idx * 4), cgctx.seen); | |
657 | } | |
658 | ||
659 | if (bpf_jit_enable > 1) | |
660 | pr_info("flen=%d proglen=%u pass=%d image=%p\n", | |
661 | flen, proglen, pass, image); | |
662 | ||
663 | if (image) { | |
664 | if (bpf_jit_enable > 1) | |
665 | print_hex_dump(KERN_ERR, "JIT code: ", | |
666 | DUMP_PREFIX_ADDRESS, | |
667 | 16, 1, code_base, | |
668 | proglen, false); | |
669 | ||
670 | bpf_flush_icache(code_base, code_base + (proglen/4)); | |
671 | /* Function descriptor nastiness: Address + TOC */ | |
672 | ((u64 *)image)[0] = (u64)code_base; | |
673 | ((u64 *)image)[1] = local_paca->kernel_toc; | |
674 | fp->bpf_func = (void *)image; | |
675 | } | |
676 | out: | |
677 | kfree(addrs); | |
678 | return; | |
679 | } | |
680 | ||
681 | static void jit_free_defer(struct work_struct *arg) | |
682 | { | |
683 | module_free(NULL, arg); | |
684 | } | |
685 | ||
686 | /* run from softirq, we must use a work_struct to call | |
687 | * module_free() from process context | |
688 | */ | |
689 | void bpf_jit_free(struct sk_filter *fp) | |
690 | { | |
691 | if (fp->bpf_func != sk_run_filter) { | |
692 | struct work_struct *work = (struct work_struct *)fp->bpf_func; | |
693 | ||
694 | INIT_WORK(work, jit_free_defer); | |
695 | schedule_work(work); | |
696 | } | |
697 | } |