Commit | Line | Data |
---|---|---|
0ca87f05 ME |
1 | /* bpf_jit_comp.c: BPF JIT compiler for PPC64 |
2 | * | |
3 | * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation | |
4 | * | |
5 | * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; version 2 | |
10 | * of the License. | |
11 | */ | |
12 | #include <linux/moduleloader.h> | |
13 | #include <asm/cacheflush.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/filter.h> | |
5082dfb7 DB |
16 | #include <linux/if_vlan.h> |
17 | ||
0ca87f05 ME |
18 | #include "bpf_jit.h" |
19 | ||
0ca87f05 ME |
20 | int bpf_jit_enable __read_mostly; |
21 | ||
0ca87f05 ME |
22 | static inline void bpf_flush_icache(void *start, void *end) |
23 | { | |
24 | smp_wmb(); | |
25 | flush_icache_range((unsigned long)start, (unsigned long)end); | |
26 | } | |
27 | ||
7ae457c1 | 28 | static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, |
0ca87f05 ME |
29 | struct codegen_context *ctx) |
30 | { | |
31 | int i; | |
32 | const struct sock_filter *filter = fp->insns; | |
33 | ||
34 | if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { | |
35 | /* Make stackframe */ | |
36 | if (ctx->seen & SEEN_DATAREF) { | |
37 | /* If we call any helpers (for loads), save LR */ | |
c75df6f9 | 38 | EMIT(PPC_INST_MFLR | __PPC_RT(R0)); |
0ca87f05 ME |
39 | PPC_STD(0, 1, 16); |
40 | ||
41 | /* Back up non-volatile regs. */ | |
42 | PPC_STD(r_D, 1, -(8*(32-r_D))); | |
43 | PPC_STD(r_HL, 1, -(8*(32-r_HL))); | |
44 | } | |
45 | if (ctx->seen & SEEN_MEM) { | |
46 | /* | |
47 | * Conditionally save regs r15-r31 as some will be used | |
48 | * for M[] data. | |
49 | */ | |
50 | for (i = r_M; i < (r_M+16); i++) { | |
51 | if (ctx->seen & (1 << (i-r_M))) | |
52 | PPC_STD(i, 1, -(8*(32-i))); | |
53 | } | |
54 | } | |
c75df6f9 | 55 | EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | |
0ca87f05 ME |
56 | (-BPF_PPC_STACKFRAME & 0xfffc)); |
57 | } | |
58 | ||
59 | if (ctx->seen & SEEN_DATAREF) { | |
60 | /* | |
61 | * If this filter needs to access skb data, | |
62 | * prepare r_D and r_HL: | |
63 | * r_HL = skb->len - skb->data_len | |
64 | * r_D = skb->data | |
65 | */ | |
66 | PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, | |
67 | data_len)); | |
68 | PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); | |
69 | PPC_SUB(r_HL, r_HL, r_scratch1); | |
70 | PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); | |
71 | } | |
72 | ||
73 | if (ctx->seen & SEEN_XREG) { | |
74 | /* | |
75 | * TODO: Could also detect whether first instr. sets X and | |
76 | * avoid this (as below, with A). | |
77 | */ | |
78 | PPC_LI(r_X, 0); | |
79 | } | |
80 | ||
81 | switch (filter[0].code) { | |
34805931 DB |
82 | case BPF_RET | BPF_K: |
83 | case BPF_LD | BPF_W | BPF_LEN: | |
84 | case BPF_LD | BPF_W | BPF_ABS: | |
85 | case BPF_LD | BPF_H | BPF_ABS: | |
86 | case BPF_LD | BPF_B | BPF_ABS: | |
0ca87f05 ME |
87 | /* first instruction sets A register (or is RET 'constant') */ |
88 | break; | |
89 | default: | |
90 | /* make sure we dont leak kernel information to user */ | |
91 | PPC_LI(r_A, 0); | |
92 | } | |
93 | } | |
94 | ||
95 | static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) | |
96 | { | |
97 | int i; | |
98 | ||
99 | if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { | |
100 | PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); | |
101 | if (ctx->seen & SEEN_DATAREF) { | |
102 | PPC_LD(0, 1, 16); | |
103 | PPC_MTLR(0); | |
104 | PPC_LD(r_D, 1, -(8*(32-r_D))); | |
105 | PPC_LD(r_HL, 1, -(8*(32-r_HL))); | |
106 | } | |
107 | if (ctx->seen & SEEN_MEM) { | |
108 | /* Restore any saved non-vol registers */ | |
109 | for (i = r_M; i < (r_M+16); i++) { | |
110 | if (ctx->seen & (1 << (i-r_M))) | |
111 | PPC_LD(i, 1, -(8*(32-i))); | |
112 | } | |
113 | } | |
114 | } | |
115 | /* The RETs have left a return value in R3. */ | |
116 | ||
117 | PPC_BLR(); | |
118 | } | |
119 | ||
05be1824 JS |
120 | #define CHOOSE_LOAD_FUNC(K, func) \ |
121 | ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) | |
122 | ||
0ca87f05 | 123 | /* Assemble the body code between the prologue & epilogue. */ |
7ae457c1 | 124 | static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, |
0ca87f05 ME |
125 | struct codegen_context *ctx, |
126 | unsigned int *addrs) | |
127 | { | |
128 | const struct sock_filter *filter = fp->insns; | |
129 | int flen = fp->len; | |
130 | u8 *func; | |
131 | unsigned int true_cond; | |
132 | int i; | |
133 | ||
134 | /* Start of epilogue code */ | |
135 | unsigned int exit_addr = addrs[flen]; | |
136 | ||
137 | for (i = 0; i < flen; i++) { | |
138 | unsigned int K = filter[i].k; | |
34805931 | 139 | u16 code = bpf_anc_helper(&filter[i]); |
0ca87f05 ME |
140 | |
141 | /* | |
142 | * addrs[] maps a BPF bytecode address into a real offset from | |
143 | * the start of the body code. | |
144 | */ | |
145 | addrs[i] = ctx->idx * 4; | |
146 | ||
34805931 | 147 | switch (code) { |
0ca87f05 | 148 | /*** ALU ops ***/ |
34805931 | 149 | case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ |
0ca87f05 ME |
150 | ctx->seen |= SEEN_XREG; |
151 | PPC_ADD(r_A, r_A, r_X); | |
152 | break; | |
34805931 | 153 | case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ |
0ca87f05 ME |
154 | if (!K) |
155 | break; | |
156 | PPC_ADDI(r_A, r_A, IMM_L(K)); | |
157 | if (K >= 32768) | |
158 | PPC_ADDIS(r_A, r_A, IMM_HA(K)); | |
159 | break; | |
34805931 | 160 | case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ |
0ca87f05 ME |
161 | ctx->seen |= SEEN_XREG; |
162 | PPC_SUB(r_A, r_A, r_X); | |
163 | break; | |
34805931 | 164 | case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ |
0ca87f05 ME |
165 | if (!K) |
166 | break; | |
167 | PPC_ADDI(r_A, r_A, IMM_L(-K)); | |
168 | if (K >= 32768) | |
169 | PPC_ADDIS(r_A, r_A, IMM_HA(-K)); | |
170 | break; | |
34805931 | 171 | case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ |
0ca87f05 ME |
172 | ctx->seen |= SEEN_XREG; |
173 | PPC_MUL(r_A, r_A, r_X); | |
174 | break; | |
34805931 | 175 | case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ |
0ca87f05 ME |
176 | if (K < 32768) |
177 | PPC_MULI(r_A, r_A, K); | |
178 | else { | |
179 | PPC_LI32(r_scratch1, K); | |
180 | PPC_MUL(r_A, r_A, r_scratch1); | |
181 | } | |
182 | break; | |
34805931 | 183 | case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ |
b0c06d33 VM |
184 | ctx->seen |= SEEN_XREG; |
185 | PPC_CMPWI(r_X, 0); | |
186 | if (ctx->pc_ret0 != -1) { | |
187 | PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); | |
188 | } else { | |
189 | PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); | |
190 | PPC_LI(r_ret, 0); | |
191 | PPC_JMP(exit_addr); | |
192 | } | |
193 | PPC_DIVWU(r_scratch1, r_A, r_X); | |
194 | PPC_MUL(r_scratch1, r_X, r_scratch1); | |
195 | PPC_SUB(r_A, r_A, r_scratch1); | |
196 | break; | |
34805931 | 197 | case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ |
b0c06d33 VM |
198 | PPC_LI32(r_scratch2, K); |
199 | PPC_DIVWU(r_scratch1, r_A, r_scratch2); | |
200 | PPC_MUL(r_scratch1, r_scratch2, r_scratch1); | |
201 | PPC_SUB(r_A, r_A, r_scratch1); | |
202 | break; | |
34805931 | 203 | case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ |
0ca87f05 ME |
204 | ctx->seen |= SEEN_XREG; |
205 | PPC_CMPWI(r_X, 0); | |
206 | if (ctx->pc_ret0 != -1) { | |
207 | PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); | |
208 | } else { | |
209 | /* | |
210 | * Exit, returning 0; first pass hits here | |
211 | * (longer worst-case code size). | |
212 | */ | |
213 | PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); | |
214 | PPC_LI(r_ret, 0); | |
215 | PPC_JMP(exit_addr); | |
216 | } | |
217 | PPC_DIVWU(r_A, r_A, r_X); | |
218 | break; | |
34805931 | 219 | case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ |
aee636c4 ED |
220 | if (K == 1) |
221 | break; | |
0ca87f05 | 222 | PPC_LI32(r_scratch1, K); |
aee636c4 | 223 | PPC_DIVWU(r_A, r_A, r_scratch1); |
0ca87f05 | 224 | break; |
34805931 | 225 | case BPF_ALU | BPF_AND | BPF_X: |
0ca87f05 ME |
226 | ctx->seen |= SEEN_XREG; |
227 | PPC_AND(r_A, r_A, r_X); | |
228 | break; | |
34805931 | 229 | case BPF_ALU | BPF_AND | BPF_K: |
0ca87f05 ME |
230 | if (!IMM_H(K)) |
231 | PPC_ANDI(r_A, r_A, K); | |
232 | else { | |
233 | PPC_LI32(r_scratch1, K); | |
234 | PPC_AND(r_A, r_A, r_scratch1); | |
235 | } | |
236 | break; | |
34805931 | 237 | case BPF_ALU | BPF_OR | BPF_X: |
0ca87f05 ME |
238 | ctx->seen |= SEEN_XREG; |
239 | PPC_OR(r_A, r_A, r_X); | |
240 | break; | |
34805931 | 241 | case BPF_ALU | BPF_OR | BPF_K: |
0ca87f05 ME |
242 | if (IMM_L(K)) |
243 | PPC_ORI(r_A, r_A, IMM_L(K)); | |
244 | if (K >= 65536) | |
245 | PPC_ORIS(r_A, r_A, IMM_H(K)); | |
246 | break; | |
34805931 DB |
247 | case BPF_ANC | SKF_AD_ALU_XOR_X: |
248 | case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ | |
02871903 DB |
249 | ctx->seen |= SEEN_XREG; |
250 | PPC_XOR(r_A, r_A, r_X); | |
251 | break; | |
34805931 | 252 | case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ |
02871903 DB |
253 | if (IMM_L(K)) |
254 | PPC_XORI(r_A, r_A, IMM_L(K)); | |
255 | if (K >= 65536) | |
256 | PPC_XORIS(r_A, r_A, IMM_H(K)); | |
257 | break; | |
34805931 | 258 | case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ |
0ca87f05 ME |
259 | ctx->seen |= SEEN_XREG; |
260 | PPC_SLW(r_A, r_A, r_X); | |
261 | break; | |
34805931 | 262 | case BPF_ALU | BPF_LSH | BPF_K: |
0ca87f05 ME |
263 | if (K == 0) |
264 | break; | |
265 | else | |
266 | PPC_SLWI(r_A, r_A, K); | |
267 | break; | |
34805931 | 268 | case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ |
0ca87f05 ME |
269 | ctx->seen |= SEEN_XREG; |
270 | PPC_SRW(r_A, r_A, r_X); | |
271 | break; | |
34805931 | 272 | case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ |
0ca87f05 ME |
273 | if (K == 0) |
274 | break; | |
275 | else | |
276 | PPC_SRWI(r_A, r_A, K); | |
277 | break; | |
34805931 | 278 | case BPF_ALU | BPF_NEG: |
0ca87f05 ME |
279 | PPC_NEG(r_A, r_A); |
280 | break; | |
34805931 | 281 | case BPF_RET | BPF_K: |
0ca87f05 ME |
282 | PPC_LI32(r_ret, K); |
283 | if (!K) { | |
284 | if (ctx->pc_ret0 == -1) | |
285 | ctx->pc_ret0 = i; | |
286 | } | |
287 | /* | |
288 | * If this isn't the very last instruction, branch to | |
289 | * the epilogue if we've stuff to clean up. Otherwise, | |
290 | * if there's nothing to tidy, just return. If we /are/ | |
291 | * the last instruction, we're about to fall through to | |
292 | * the epilogue to return. | |
293 | */ | |
294 | if (i != flen - 1) { | |
295 | /* | |
296 | * Note: 'seen' is properly valid only on pass | |
297 | * #2. Both parts of this conditional are the | |
298 | * same instruction size though, meaning the | |
299 | * first pass will still correctly determine the | |
300 | * code size/addresses. | |
301 | */ | |
302 | if (ctx->seen) | |
303 | PPC_JMP(exit_addr); | |
304 | else | |
305 | PPC_BLR(); | |
306 | } | |
307 | break; | |
34805931 | 308 | case BPF_RET | BPF_A: |
0ca87f05 ME |
309 | PPC_MR(r_ret, r_A); |
310 | if (i != flen - 1) { | |
311 | if (ctx->seen) | |
312 | PPC_JMP(exit_addr); | |
313 | else | |
314 | PPC_BLR(); | |
315 | } | |
316 | break; | |
34805931 | 317 | case BPF_MISC | BPF_TAX: /* X = A */ |
0ca87f05 ME |
318 | PPC_MR(r_X, r_A); |
319 | break; | |
34805931 | 320 | case BPF_MISC | BPF_TXA: /* A = X */ |
0ca87f05 ME |
321 | ctx->seen |= SEEN_XREG; |
322 | PPC_MR(r_A, r_X); | |
323 | break; | |
324 | ||
325 | /*** Constant loads/M[] access ***/ | |
34805931 | 326 | case BPF_LD | BPF_IMM: /* A = K */ |
0ca87f05 ME |
327 | PPC_LI32(r_A, K); |
328 | break; | |
34805931 | 329 | case BPF_LDX | BPF_IMM: /* X = K */ |
0ca87f05 ME |
330 | PPC_LI32(r_X, K); |
331 | break; | |
34805931 | 332 | case BPF_LD | BPF_MEM: /* A = mem[K] */ |
0ca87f05 ME |
333 | PPC_MR(r_A, r_M + (K & 0xf)); |
334 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | |
335 | break; | |
34805931 | 336 | case BPF_LDX | BPF_MEM: /* X = mem[K] */ |
0ca87f05 ME |
337 | PPC_MR(r_X, r_M + (K & 0xf)); |
338 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | |
339 | break; | |
34805931 | 340 | case BPF_ST: /* mem[K] = A */ |
0ca87f05 ME |
341 | PPC_MR(r_M + (K & 0xf), r_A); |
342 | ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); | |
343 | break; | |
34805931 | 344 | case BPF_STX: /* mem[K] = X */ |
0ca87f05 ME |
345 | PPC_MR(r_M + (K & 0xf), r_X); |
346 | ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); | |
347 | break; | |
34805931 | 348 | case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ |
0ca87f05 ME |
349 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); |
350 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); | |
351 | break; | |
34805931 | 352 | case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ |
0ca87f05 ME |
353 | PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); |
354 | break; | |
355 | ||
356 | /*** Ancillary info loads ***/ | |
34805931 | 357 | case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ |
0ca87f05 ME |
358 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, |
359 | protocol) != 2); | |
9c662cad PB |
360 | PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
361 | protocol)); | |
0ca87f05 | 362 | break; |
34805931 | 363 | case BPF_ANC | SKF_AD_IFINDEX: |
0ca87f05 ME |
364 | PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, |
365 | dev)); | |
366 | PPC_CMPDI(r_scratch1, 0); | |
367 | if (ctx->pc_ret0 != -1) { | |
368 | PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); | |
369 | } else { | |
370 | /* Exit, returning 0; first pass hits here. */ | |
371 | PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); | |
372 | PPC_LI(r_ret, 0); | |
373 | PPC_JMP(exit_addr); | |
374 | } | |
375 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, | |
376 | ifindex) != 4); | |
377 | PPC_LWZ_OFFS(r_A, r_scratch1, | |
378 | offsetof(struct net_device, ifindex)); | |
379 | break; | |
34805931 | 380 | case BPF_ANC | SKF_AD_MARK: |
0ca87f05 ME |
381 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
382 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | |
383 | mark)); | |
384 | break; | |
34805931 | 385 | case BPF_ANC | SKF_AD_RXHASH: |
61b905da | 386 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); |
0ca87f05 | 387 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
61b905da | 388 | hash)); |
0ca87f05 | 389 | break; |
34805931 DB |
390 | case BPF_ANC | SKF_AD_VLAN_TAG: |
391 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: | |
5082dfb7 | 392 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); |
3fc60aa0 DK |
393 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); |
394 | ||
5082dfb7 DB |
395 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, |
396 | vlan_tci)); | |
dba63115 | 397 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { |
3fc60aa0 | 398 | PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT); |
dba63115 | 399 | } else { |
5082dfb7 | 400 | PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); |
dba63115 DK |
401 | PPC_SRWI(r_A, r_A, 12); |
402 | } | |
5082dfb7 | 403 | break; |
34805931 | 404 | case BPF_ANC | SKF_AD_QUEUE: |
0ca87f05 ME |
405 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, |
406 | queue_mapping) != 2); | |
407 | PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, | |
408 | queue_mapping)); | |
409 | break; | |
4e235761 DK |
410 | case BPF_ANC | SKF_AD_PKTTYPE: |
411 | PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET()); | |
412 | PPC_ANDI(r_A, r_A, PKT_TYPE_MAX); | |
413 | PPC_SRWI(r_A, r_A, 5); | |
414 | break; | |
34805931 | 415 | case BPF_ANC | SKF_AD_CPU: |
0ca87f05 ME |
416 | #ifdef CONFIG_SMP |
417 | /* | |
418 | * PACA ptr is r13: | |
419 | * raw_smp_processor_id() = local_paca->paca_index | |
420 | */ | |
421 | BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, | |
422 | paca_index) != 2); | |
423 | PPC_LHZ_OFFS(r_A, 13, | |
424 | offsetof(struct paca_struct, paca_index)); | |
425 | #else | |
426 | PPC_LI(r_A, 0); | |
427 | #endif | |
428 | break; | |
429 | ||
430 | /*** Absolute loads from packet header/data ***/ | |
34805931 | 431 | case BPF_LD | BPF_W | BPF_ABS: |
05be1824 | 432 | func = CHOOSE_LOAD_FUNC(K, sk_load_word); |
0ca87f05 | 433 | goto common_load; |
34805931 | 434 | case BPF_LD | BPF_H | BPF_ABS: |
05be1824 | 435 | func = CHOOSE_LOAD_FUNC(K, sk_load_half); |
0ca87f05 | 436 | goto common_load; |
34805931 | 437 | case BPF_LD | BPF_B | BPF_ABS: |
05be1824 | 438 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte); |
0ca87f05 | 439 | common_load: |
05be1824 | 440 | /* Load from [K]. */ |
0ca87f05 | 441 | ctx->seen |= SEEN_DATAREF; |
0ca87f05 ME |
442 | PPC_LI64(r_scratch1, func); |
443 | PPC_MTLR(r_scratch1); | |
444 | PPC_LI32(r_addr, K); | |
445 | PPC_BLRL(); | |
446 | /* | |
447 | * Helper returns 'lt' condition on error, and an | |
448 | * appropriate return value in r3 | |
449 | */ | |
450 | PPC_BCC(COND_LT, exit_addr); | |
451 | break; | |
452 | ||
453 | /*** Indirect loads from packet header/data ***/ | |
34805931 | 454 | case BPF_LD | BPF_W | BPF_IND: |
0ca87f05 ME |
455 | func = sk_load_word; |
456 | goto common_load_ind; | |
34805931 | 457 | case BPF_LD | BPF_H | BPF_IND: |
0ca87f05 ME |
458 | func = sk_load_half; |
459 | goto common_load_ind; | |
34805931 | 460 | case BPF_LD | BPF_B | BPF_IND: |
0ca87f05 ME |
461 | func = sk_load_byte; |
462 | common_load_ind: | |
463 | /* | |
464 | * Load from [X + K]. Negative offsets are tested for | |
05be1824 | 465 | * in the helper functions. |
0ca87f05 ME |
466 | */ |
467 | ctx->seen |= SEEN_DATAREF | SEEN_XREG; | |
468 | PPC_LI64(r_scratch1, func); | |
469 | PPC_MTLR(r_scratch1); | |
470 | PPC_ADDI(r_addr, r_X, IMM_L(K)); | |
471 | if (K >= 32768) | |
472 | PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); | |
473 | PPC_BLRL(); | |
474 | /* If error, cr0.LT set */ | |
475 | PPC_BCC(COND_LT, exit_addr); | |
476 | break; | |
477 | ||
34805931 | 478 | case BPF_LDX | BPF_B | BPF_MSH: |
05be1824 | 479 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); |
0ca87f05 ME |
480 | goto common_load; |
481 | break; | |
482 | ||
483 | /*** Jump and branches ***/ | |
34805931 | 484 | case BPF_JMP | BPF_JA: |
0ca87f05 ME |
485 | if (K != 0) |
486 | PPC_JMP(addrs[i + 1 + K]); | |
487 | break; | |
488 | ||
34805931 DB |
489 | case BPF_JMP | BPF_JGT | BPF_K: |
490 | case BPF_JMP | BPF_JGT | BPF_X: | |
0ca87f05 ME |
491 | true_cond = COND_GT; |
492 | goto cond_branch; | |
34805931 DB |
493 | case BPF_JMP | BPF_JGE | BPF_K: |
494 | case BPF_JMP | BPF_JGE | BPF_X: | |
0ca87f05 ME |
495 | true_cond = COND_GE; |
496 | goto cond_branch; | |
34805931 DB |
497 | case BPF_JMP | BPF_JEQ | BPF_K: |
498 | case BPF_JMP | BPF_JEQ | BPF_X: | |
0ca87f05 ME |
499 | true_cond = COND_EQ; |
500 | goto cond_branch; | |
34805931 DB |
501 | case BPF_JMP | BPF_JSET | BPF_K: |
502 | case BPF_JMP | BPF_JSET | BPF_X: | |
0ca87f05 ME |
503 | true_cond = COND_NE; |
504 | /* Fall through */ | |
505 | cond_branch: | |
506 | /* same targets, can avoid doing the test :) */ | |
507 | if (filter[i].jt == filter[i].jf) { | |
508 | if (filter[i].jt > 0) | |
509 | PPC_JMP(addrs[i + 1 + filter[i].jt]); | |
510 | break; | |
511 | } | |
512 | ||
34805931 DB |
513 | switch (code) { |
514 | case BPF_JMP | BPF_JGT | BPF_X: | |
515 | case BPF_JMP | BPF_JGE | BPF_X: | |
516 | case BPF_JMP | BPF_JEQ | BPF_X: | |
0ca87f05 ME |
517 | ctx->seen |= SEEN_XREG; |
518 | PPC_CMPLW(r_A, r_X); | |
519 | break; | |
34805931 | 520 | case BPF_JMP | BPF_JSET | BPF_X: |
0ca87f05 ME |
521 | ctx->seen |= SEEN_XREG; |
522 | PPC_AND_DOT(r_scratch1, r_A, r_X); | |
523 | break; | |
34805931 DB |
524 | case BPF_JMP | BPF_JEQ | BPF_K: |
525 | case BPF_JMP | BPF_JGT | BPF_K: | |
526 | case BPF_JMP | BPF_JGE | BPF_K: | |
0ca87f05 ME |
527 | if (K < 32768) |
528 | PPC_CMPLWI(r_A, K); | |
529 | else { | |
530 | PPC_LI32(r_scratch1, K); | |
531 | PPC_CMPLW(r_A, r_scratch1); | |
532 | } | |
533 | break; | |
34805931 | 534 | case BPF_JMP | BPF_JSET | BPF_K: |
0ca87f05 ME |
535 | if (K < 32768) |
536 | /* PPC_ANDI is /only/ dot-form */ | |
537 | PPC_ANDI(r_scratch1, r_A, K); | |
538 | else { | |
539 | PPC_LI32(r_scratch1, K); | |
540 | PPC_AND_DOT(r_scratch1, r_A, | |
541 | r_scratch1); | |
542 | } | |
543 | break; | |
544 | } | |
545 | /* Sometimes branches are constructed "backward", with | |
546 | * the false path being the branch and true path being | |
547 | * a fallthrough to the next instruction. | |
548 | */ | |
549 | if (filter[i].jt == 0) | |
550 | /* Swap the sense of the branch */ | |
551 | PPC_BCC(true_cond ^ COND_CMP_TRUE, | |
552 | addrs[i + 1 + filter[i].jf]); | |
553 | else { | |
554 | PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); | |
555 | if (filter[i].jf != 0) | |
556 | PPC_JMP(addrs[i + 1 + filter[i].jf]); | |
557 | } | |
558 | break; | |
559 | default: | |
560 | /* The filter contains something cruel & unusual. | |
561 | * We don't handle it, but also there shouldn't be | |
562 | * anything missing from our list. | |
563 | */ | |
564 | if (printk_ratelimit()) | |
565 | pr_err("BPF filter opcode %04x (@%d) unsupported\n", | |
566 | filter[i].code, i); | |
567 | return -ENOTSUPP; | |
568 | } | |
569 | ||
570 | } | |
571 | /* Set end-of-body-code address for exit. */ | |
572 | addrs[i] = ctx->idx * 4; | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
7ae457c1 | 577 | void bpf_jit_compile(struct bpf_prog *fp) |
0ca87f05 ME |
578 | { |
579 | unsigned int proglen; | |
580 | unsigned int alloclen; | |
581 | u32 *image = NULL; | |
582 | u32 *code_base; | |
583 | unsigned int *addrs; | |
584 | struct codegen_context cgctx; | |
585 | int pass; | |
586 | int flen = fp->len; | |
587 | ||
588 | if (!bpf_jit_enable) | |
589 | return; | |
590 | ||
591 | addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); | |
592 | if (addrs == NULL) | |
593 | return; | |
594 | ||
595 | /* | |
596 | * There are multiple assembly passes as the generated code will change | |
597 | * size as it settles down, figuring out the max branch offsets/exit | |
598 | * paths required. | |
599 | * | |
600 | * The range of standard conditional branches is +/- 32Kbytes. Since | |
601 | * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to | |
602 | * finish with 8 bytes/instruction. Not feasible, so long jumps are | |
603 | * used, distinct from short branches. | |
604 | * | |
605 | * Current: | |
606 | * | |
607 | * For now, both branch types assemble to 2 words (short branches padded | |
608 | * with a NOP); this is less efficient, but assembly will always complete | |
609 | * after exactly 3 passes: | |
610 | * | |
611 | * First pass: No code buffer; Program is "faux-generated" -- no code | |
612 | * emitted but maximum size of output determined (and addrs[] filled | |
613 | * in). Also, we note whether we use M[], whether we use skb data, etc. | |
614 | * All generation choices assumed to be 'worst-case', e.g. branches all | |
615 | * far (2 instructions), return path code reduction not available, etc. | |
616 | * | |
617 | * Second pass: Code buffer allocated with size determined previously. | |
618 | * Prologue generated to support features we have seen used. Exit paths | |
619 | * determined and addrs[] is filled in again, as code may be slightly | |
620 | * smaller as a result. | |
621 | * | |
622 | * Third pass: Code generated 'for real', and branch destinations | |
623 | * determined from now-accurate addrs[] map. | |
624 | * | |
625 | * Ideal: | |
626 | * | |
627 | * If we optimise this, near branches will be shorter. On the | |
628 | * first assembly pass, we should err on the side of caution and | |
629 | * generate the biggest code. On subsequent passes, branches will be | |
630 | * generated short or long and code size will reduce. With smaller | |
631 | * code, more branches may fall into the short category, and code will | |
632 | * reduce more. | |
633 | * | |
634 | * Finally, if we see one pass generate code the same size as the | |
635 | * previous pass we have converged and should now generate code for | |
636 | * real. Allocating at the end will also save the memory that would | |
637 | * otherwise be wasted by the (small) current code shrinkage. | |
638 | * Preferably, we should do a small number of passes (e.g. 5) and if we | |
639 | * haven't converged by then, get impatient and force code to generate | |
640 | * as-is, even if the odd branch would be left long. The chances of a | |
641 | * long jump are tiny with all but the most enormous of BPF filter | |
642 | * inputs, so we should usually converge on the third pass. | |
643 | */ | |
644 | ||
645 | cgctx.idx = 0; | |
646 | cgctx.seen = 0; | |
647 | cgctx.pc_ret0 = -1; | |
648 | /* Scouting faux-generate pass 0 */ | |
649 | if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) | |
650 | /* We hit something illegal or unsupported. */ | |
651 | goto out; | |
652 | ||
653 | /* | |
654 | * Pretend to build prologue, given the features we've seen. This will | |
655 | * update ctgtx.idx as it pretends to output instructions, then we can | |
656 | * calculate total size from idx. | |
657 | */ | |
658 | bpf_jit_build_prologue(fp, 0, &cgctx); | |
659 | bpf_jit_build_epilogue(0, &cgctx); | |
660 | ||
661 | proglen = cgctx.idx * 4; | |
662 | alloclen = proglen + FUNCTION_DESCR_SIZE; | |
ed900ffb | 663 | image = module_alloc(alloclen); |
0ca87f05 ME |
664 | if (!image) |
665 | goto out; | |
666 | ||
667 | code_base = image + (FUNCTION_DESCR_SIZE/4); | |
668 | ||
669 | /* Code generation passes 1-2 */ | |
670 | for (pass = 1; pass < 3; pass++) { | |
671 | /* Now build the prologue, body code & epilogue for real. */ | |
672 | cgctx.idx = 0; | |
673 | bpf_jit_build_prologue(fp, code_base, &cgctx); | |
674 | bpf_jit_build_body(fp, code_base, &cgctx, addrs); | |
675 | bpf_jit_build_epilogue(code_base, &cgctx); | |
676 | ||
677 | if (bpf_jit_enable > 1) | |
678 | pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, | |
679 | proglen - (cgctx.idx * 4), cgctx.seen); | |
680 | } | |
681 | ||
682 | if (bpf_jit_enable > 1) | |
79617801 DB |
683 | /* Note that we output the base address of the code_base |
684 | * rather than image, since opcodes are in code_base. | |
685 | */ | |
686 | bpf_jit_dump(flen, proglen, pass, code_base); | |
0ca87f05 ME |
687 | |
688 | if (image) { | |
0ca87f05 ME |
689 | bpf_flush_icache(code_base, code_base + (proglen/4)); |
690 | /* Function descriptor nastiness: Address + TOC */ | |
691 | ((u64 *)image)[0] = (u64)code_base; | |
692 | ((u64 *)image)[1] = local_paca->kernel_toc; | |
693 | fp->bpf_func = (void *)image; | |
286aad3c | 694 | fp->jited = true; |
0ca87f05 ME |
695 | } |
696 | out: | |
697 | kfree(addrs); | |
698 | return; | |
699 | } | |
700 | ||
7ae457c1 | 701 | void bpf_jit_free(struct bpf_prog *fp) |
0ca87f05 | 702 | { |
f8bbbfc3 | 703 | if (fp->jited) |
ed900ffb | 704 | module_free(NULL, fp->bpf_func); |
60a3b225 DB |
705 | |
706 | bpf_prog_unlock_free(fp); | |
0ca87f05 | 707 | } |