Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / sec_plain.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/ptlrpc/sec_plain.c
33 *
34 * Author: Eric Mei <ericm@clusterfs.com>
35 */
36
37#define DEBUG_SUBSYSTEM S_SEC
38
e27db149
GKH
39#include "../include/obd_support.h"
40#include "../include/obd_cksum.h"
41#include "../include/obd_class.h"
42#include "../include/lustre_net.h"
43#include "../include/lustre_sec.h"
2ced12a7 44#include "ptlrpc_internal.h"
d7e09d03
PT
45
46struct plain_sec {
47 struct ptlrpc_sec pls_base;
48 rwlock_t pls_lock;
49 struct ptlrpc_cli_ctx *pls_ctx;
50};
51
52static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
53{
54 return container_of(sec, struct plain_sec, pls_base);
55}
56
57static struct ptlrpc_sec_policy plain_policy;
58static struct ptlrpc_ctx_ops plain_ctx_ops;
59static struct ptlrpc_svc_ctx plain_svc_ctx;
60
61static unsigned int plain_at_offset;
62
63/*
64 * for simplicity, plain policy rpc use fixed layout.
65 */
66#define PLAIN_PACK_SEGMENTS (4)
67
68#define PLAIN_PACK_HDR_OFF (0)
69#define PLAIN_PACK_MSG_OFF (1)
70#define PLAIN_PACK_USER_OFF (2)
71#define PLAIN_PACK_BULK_OFF (3)
72
73#define PLAIN_FL_USER (0x01)
74#define PLAIN_FL_BULK (0x02)
75
76struct plain_header {
77 __u8 ph_ver; /* 0 */
78 __u8 ph_flags;
79 __u8 ph_sp; /* source */
80 __u8 ph_bulk_hash_alg; /* complete flavor desc */
81 __u8 ph_pad[4];
82};
83
84struct plain_bulk_token {
85 __u8 pbt_hash[8];
86};
87
88#define PLAIN_BSD_SIZE \
89 (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
90
91/****************************************
92 * bulk checksum helpers *
93 ****************************************/
94
95static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
96{
97 struct ptlrpc_bulk_sec_desc *bsd;
98
99 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
100 return -EPROTO;
101
102 bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
8b382089 103 if (!bsd) {
d7e09d03
PT
104 CERROR("bulk sec desc has short size %d\n",
105 lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
106 return -EPROTO;
107 }
108
109 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
110 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
111 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
112 return -EPROTO;
113 }
114
115 return 0;
116}
117
118static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
119 __u8 hash_alg,
120 struct plain_bulk_token *token)
121{
122 if (hash_alg == BULK_HASH_ALG_NULL)
123 return 0;
124
125 memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
126 return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
127 sizeof(token->pbt_hash));
128}
129
130static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
131 __u8 hash_alg,
132 struct plain_bulk_token *tokenr)
133{
134 struct plain_bulk_token tokenv;
d0bfef31 135 int rc;
d7e09d03
PT
136
137 if (hash_alg == BULK_HASH_ALG_NULL)
138 return 0;
139
140 memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
141 rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
142 sizeof(tokenv.pbt_hash));
143 if (rc)
144 return rc;
145
146 if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
147 return -EACCES;
148 return 0;
149}
150
151static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
152{
d0bfef31
CH
153 char *ptr;
154 unsigned int off, i;
d7e09d03
PT
155
156 for (i = 0; i < desc->bd_iov_count; i++) {
65ffc679 157 if (desc->bd_iov[i].bv_len == 0)
d7e09d03
PT
158 continue;
159
65ffc679
AV
160 ptr = kmap(desc->bd_iov[i].bv_page);
161 off = desc->bd_iov[i].bv_offset & ~PAGE_MASK;
d7e09d03 162 ptr[off] ^= 0x1;
65ffc679 163 kunmap(desc->bd_iov[i].bv_page);
d7e09d03
PT
164 return;
165 }
166}
167
168/****************************************
169 * cli_ctx apis *
170 ****************************************/
171
172static
173int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
174{
175 /* should never reach here */
176 LBUG();
177 return 0;
178}
179
180static
181int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
182{
183 return 0;
184}
185
186static
187int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
188{
d0bfef31 189 struct lustre_msg *msg = req->rq_reqbuf;
d7e09d03 190 struct plain_header *phdr;
d7e09d03
PT
191
192 msg->lm_secflvr = req->rq_flvr.sf_rpc;
193
194 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
195 phdr->ph_ver = 0;
196 phdr->ph_flags = 0;
197 phdr->ph_sp = ctx->cc_sec->ps_part;
198 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
199
200 if (req->rq_pack_udesc)
201 phdr->ph_flags |= PLAIN_FL_USER;
202 if (req->rq_pack_bulk)
203 phdr->ph_flags |= PLAIN_FL_BULK;
204
205 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
206 msg->lm_buflens);
0a3bdb00 207 return 0;
d7e09d03
PT
208}
209
210static
211int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
212{
d0bfef31 213 struct lustre_msg *msg = req->rq_repdata;
d7e09d03 214 struct plain_header *phdr;
d0bfef31
CH
215 __u32 cksum;
216 int swabbed;
d7e09d03
PT
217
218 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
219 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
0a3bdb00 220 return -EPROTO;
d7e09d03
PT
221 }
222
223 swabbed = ptlrpc_rep_need_swab(req);
224
225 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
8b382089 226 if (!phdr) {
d7e09d03 227 CERROR("missing plain header\n");
0a3bdb00 228 return -EPROTO;
d7e09d03
PT
229 }
230
231 if (phdr->ph_ver != 0) {
232 CERROR("Invalid header version\n");
0a3bdb00 233 return -EPROTO;
d7e09d03
PT
234 }
235
236 /* expect no user desc in reply */
237 if (phdr->ph_flags & PLAIN_FL_USER) {
238 CERROR("Unexpected udesc flag in reply\n");
0a3bdb00 239 return -EPROTO;
d7e09d03
PT
240 }
241
242 if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
243 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
244 req->rq_flvr.u_bulk.hash.hash_alg);
0a3bdb00 245 return -EPROTO;
d7e09d03
PT
246 }
247
248 if (unlikely(req->rq_early)) {
249 unsigned int hsize = 4;
250
251 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
24c198e9
OD
252 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
253 0),
254 lustre_msg_buflen(msg,
255 PLAIN_PACK_MSG_OFF),
256 NULL, 0, (unsigned char *)&cksum,
257 &hsize);
d7e09d03
PT
258 if (cksum != msg->lm_cksum) {
259 CDEBUG(D_SEC,
260 "early reply checksum mismatch: %08x != %08x\n",
261 cpu_to_le32(cksum), msg->lm_cksum);
0a3bdb00 262 return -EINVAL;
d7e09d03
PT
263 }
264 } else {
265 /* whether we sent with bulk or not, we expect the same
dadfcdab
OD
266 * in reply, except for early reply
267 */
d7e09d03
PT
268 if (!req->rq_early &&
269 !equi(req->rq_pack_bulk == 1,
270 phdr->ph_flags & PLAIN_FL_BULK)) {
271 CERROR("%s bulk checksum in reply\n",
272 req->rq_pack_bulk ? "Missing" : "Unexpected");
0a3bdb00 273 return -EPROTO;
d7e09d03
PT
274 }
275
276 if (phdr->ph_flags & PLAIN_FL_BULK) {
277 if (plain_unpack_bsd(msg, swabbed))
0a3bdb00 278 return -EPROTO;
d7e09d03
PT
279 }
280 }
281
282 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
283 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
0a3bdb00 284 return 0;
d7e09d03
PT
285}
286
287static
288int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
289 struct ptlrpc_request *req,
290 struct ptlrpc_bulk_desc *desc)
291{
292 struct ptlrpc_bulk_sec_desc *bsd;
d0bfef31
CH
293 struct plain_bulk_token *token;
294 int rc;
d7e09d03
PT
295
296 LASSERT(req->rq_pack_bulk);
297 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
298
299 bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
9797fb0e 300 token = (struct plain_bulk_token *)bsd->bsd_data;
d7e09d03
PT
301
302 bsd->bsd_version = 0;
303 bsd->bsd_flags = 0;
304 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
305 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
306
307 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
0a3bdb00 308 return 0;
d7e09d03
PT
309
310 if (req->rq_bulk_read)
0a3bdb00 311 return 0;
d7e09d03
PT
312
313 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
314 token);
315 if (rc) {
316 CERROR("bulk write: failed to compute checksum: %d\n", rc);
317 } else {
318 /*
319 * for sending we only compute the wrong checksum instead
320 * of corrupting the data so it is still correct on a redo
321 */
322 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
323 req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
324 token->pbt_hash[0] ^= 0x1;
325 }
326
327 return rc;
328}
329
330static
331int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
332 struct ptlrpc_request *req,
333 struct ptlrpc_bulk_desc *desc)
334{
335 struct ptlrpc_bulk_sec_desc *bsdv;
d0bfef31
CH
336 struct plain_bulk_token *tokenv;
337 int rc;
338 int i, nob;
d7e09d03
PT
339
340 LASSERT(req->rq_pack_bulk);
341 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
342 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
343
344 bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
9797fb0e 345 tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
d7e09d03
PT
346
347 if (req->rq_bulk_write) {
348 if (bsdv->bsd_flags & BSD_FL_ERR)
349 return -EIO;
350 return 0;
351 }
352
353 /* fix the actual data size */
354 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
65ffc679
AV
355 if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) {
356 desc->bd_iov[i].bv_len =
d7e09d03
PT
357 desc->bd_nob_transferred - nob;
358 }
65ffc679 359 nob += desc->bd_iov[i].bv_len;
d7e09d03
PT
360 }
361
362 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
363 tokenv);
364 if (rc)
365 CERROR("bulk read: client verify failed: %d\n", rc);
366
367 return rc;
368}
369
370/****************************************
371 * sec apis *
372 ****************************************/
373
374static
375struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
376{
d0bfef31 377 struct ptlrpc_cli_ctx *ctx, *ctx_new;
d7e09d03 378
9ae10597 379 ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
d7e09d03
PT
380
381 write_lock(&plsec->pls_lock);
382
383 ctx = plsec->pls_ctx;
384 if (ctx) {
385 atomic_inc(&ctx->cc_refcount);
386
76372381 387 kfree(ctx_new);
d7e09d03
PT
388 } else if (ctx_new) {
389 ctx = ctx_new;
390
391 atomic_set(&ctx->cc_refcount, 1); /* for cache */
392 ctx->cc_sec = &plsec->pls_base;
393 ctx->cc_ops = &plain_ctx_ops;
394 ctx->cc_expire = 0;
395 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
396 ctx->cc_vcred.vc_uid = 0;
397 spin_lock_init(&ctx->cc_lock);
398 INIT_LIST_HEAD(&ctx->cc_req_list);
399 INIT_LIST_HEAD(&ctx->cc_gc_chain);
400
401 plsec->pls_ctx = ctx;
402 atomic_inc(&plsec->pls_base.ps_nctx);
403 atomic_inc(&plsec->pls_base.ps_refcount);
404
405 atomic_inc(&ctx->cc_refcount); /* for caller */
406 }
407
408 write_unlock(&plsec->pls_lock);
409
410 return ctx;
411}
412
413static
414void plain_destroy_sec(struct ptlrpc_sec *sec)
415{
d0bfef31 416 struct plain_sec *plsec = sec2plsec(sec);
d7e09d03
PT
417
418 LASSERT(sec->ps_policy == &plain_policy);
419 LASSERT(sec->ps_import);
420 LASSERT(atomic_read(&sec->ps_refcount) == 0);
421 LASSERT(atomic_read(&sec->ps_nctx) == 0);
8b382089 422 LASSERT(!plsec->pls_ctx);
d7e09d03
PT
423
424 class_import_put(sec->ps_import);
425
9ae10597 426 kfree(plsec);
d7e09d03
PT
427}
428
429static
430void plain_kill_sec(struct ptlrpc_sec *sec)
431{
432 sec->ps_dying = 1;
433}
434
435static
436struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
437 struct ptlrpc_svc_ctx *svc_ctx,
438 struct sptlrpc_flavor *sf)
439{
d0bfef31
CH
440 struct plain_sec *plsec;
441 struct ptlrpc_sec *sec;
442 struct ptlrpc_cli_ctx *ctx;
d7e09d03
PT
443
444 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
445
9ae10597 446 plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
597851ac 447 if (!plsec)
0a3bdb00 448 return NULL;
d7e09d03
PT
449
450 /*
451 * initialize plain_sec
452 */
453 rwlock_init(&plsec->pls_lock);
454 plsec->pls_ctx = NULL;
455
456 sec = &plsec->pls_base;
457 sec->ps_policy = &plain_policy;
458 atomic_set(&sec->ps_refcount, 0);
459 atomic_set(&sec->ps_nctx, 0);
460 sec->ps_id = sptlrpc_get_next_secid();
461 sec->ps_import = class_import_get(imp);
462 sec->ps_flvr = *sf;
463 spin_lock_init(&sec->ps_lock);
464 INIT_LIST_HEAD(&sec->ps_gc_list);
465 sec->ps_gc_interval = 0;
466 sec->ps_gc_next = 0;
467
468 /* install ctx immediately if this is a reverse sec */
469 if (svc_ctx) {
470 ctx = plain_sec_install_ctx(plsec);
8b382089 471 if (!ctx) {
d7e09d03 472 plain_destroy_sec(sec);
0a3bdb00 473 return NULL;
d7e09d03
PT
474 }
475 sptlrpc_cli_ctx_put(ctx, 1);
476 }
477
0a3bdb00 478 return sec;
d7e09d03
PT
479}
480
481static
482struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
483 struct vfs_cred *vcred,
484 int create, int remove_dead)
485{
d0bfef31
CH
486 struct plain_sec *plsec = sec2plsec(sec);
487 struct ptlrpc_cli_ctx *ctx;
d7e09d03
PT
488
489 read_lock(&plsec->pls_lock);
490 ctx = plsec->pls_ctx;
491 if (ctx)
492 atomic_inc(&ctx->cc_refcount);
493 read_unlock(&plsec->pls_lock);
494
8b382089 495 if (unlikely(!ctx))
d7e09d03
PT
496 ctx = plain_sec_install_ctx(plsec);
497
0a3bdb00 498 return ctx;
d7e09d03
PT
499}
500
501static
502void plain_release_ctx(struct ptlrpc_sec *sec,
503 struct ptlrpc_cli_ctx *ctx, int sync)
504{
505 LASSERT(atomic_read(&sec->ps_refcount) > 0);
506 LASSERT(atomic_read(&sec->ps_nctx) > 0);
507 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
508 LASSERT(ctx->cc_sec == sec);
509
9ae10597 510 kfree(ctx);
d7e09d03
PT
511
512 atomic_dec(&sec->ps_nctx);
513 sptlrpc_sec_put(sec);
514}
515
516static
517int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
518 uid_t uid, int grace, int force)
519{
d0bfef31
CH
520 struct plain_sec *plsec = sec2plsec(sec);
521 struct ptlrpc_cli_ctx *ctx;
d7e09d03
PT
522
523 /* do nothing unless caller want to flush for 'all' */
524 if (uid != -1)
0a3bdb00 525 return 0;
d7e09d03
PT
526
527 write_lock(&plsec->pls_lock);
528 ctx = plsec->pls_ctx;
529 plsec->pls_ctx = NULL;
530 write_unlock(&plsec->pls_lock);
531
532 if (ctx)
533 sptlrpc_cli_ctx_put(ctx, 1);
0a3bdb00 534 return 0;
d7e09d03
PT
535}
536
537static
538int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
539 struct ptlrpc_request *req,
540 int msgsize)
541{
542 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
d0bfef31 543 int alloc_len;
d7e09d03
PT
544
545 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
546 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
547
548 if (req->rq_pack_udesc)
549 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
550
551 if (req->rq_pack_bulk) {
552 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
553 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
554 }
555
556 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
557
558 if (!req->rq_reqbuf) {
559 LASSERT(!req->rq_pool);
560
561 alloc_len = size_roundup_power2(alloc_len);
ee0ec194 562 req->rq_reqbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
d7e09d03 563 if (!req->rq_reqbuf)
0a3bdb00 564 return -ENOMEM;
d7e09d03
PT
565
566 req->rq_reqbuf_len = alloc_len;
567 } else {
568 LASSERT(req->rq_pool);
569 LASSERT(req->rq_reqbuf_len >= alloc_len);
570 memset(req->rq_reqbuf, 0, alloc_len);
571 }
572
573 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
574 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
575
db3c16cd
LL
576 if (req->rq_pack_udesc) {
577 int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
578 PLAIN_PACK_USER_OFF);
579 if (rc < 0)
580 return rc;
581 }
d7e09d03 582
0a3bdb00 583 return 0;
d7e09d03
PT
584}
585
586static
587void plain_free_reqbuf(struct ptlrpc_sec *sec,
588 struct ptlrpc_request *req)
589{
d7e09d03 590 if (!req->rq_pool) {
ee0ec194 591 kvfree(req->rq_reqbuf);
d7e09d03
PT
592 req->rq_reqbuf = NULL;
593 req->rq_reqbuf_len = 0;
594 }
d7e09d03
PT
595}
596
597static
598int plain_alloc_repbuf(struct ptlrpc_sec *sec,
599 struct ptlrpc_request *req,
600 int msgsize)
601{
602 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
603 int alloc_len;
d7e09d03
PT
604
605 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
606 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
607
608 if (req->rq_pack_bulk) {
609 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
610 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
611 }
612
613 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
614
615 /* add space for early reply */
616 alloc_len += plain_at_offset;
617
618 alloc_len = size_roundup_power2(alloc_len);
619
ee0ec194 620 req->rq_repbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
d7e09d03 621 if (!req->rq_repbuf)
0a3bdb00 622 return -ENOMEM;
d7e09d03
PT
623
624 req->rq_repbuf_len = alloc_len;
0a3bdb00 625 return 0;
d7e09d03
PT
626}
627
628static
629void plain_free_repbuf(struct ptlrpc_sec *sec,
630 struct ptlrpc_request *req)
631{
ee0ec194 632 kvfree(req->rq_repbuf);
d7e09d03
PT
633 req->rq_repbuf = NULL;
634 req->rq_repbuf_len = 0;
d7e09d03
PT
635}
636
637static
638int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
639 struct ptlrpc_request *req,
640 int segment, int newsize)
641{
d0bfef31
CH
642 struct lustre_msg *newbuf;
643 int oldsize;
644 int newmsg_size, newbuf_size;
d7e09d03
PT
645
646 LASSERT(req->rq_reqbuf);
647 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
648 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
649 req->rq_reqmsg);
650
651 /* compute new embedded msg size. */
652 oldsize = req->rq_reqmsg->lm_buflens[segment];
653 req->rq_reqmsg->lm_buflens[segment] = newsize;
654 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
655 req->rq_reqmsg->lm_buflens);
656 req->rq_reqmsg->lm_buflens[segment] = oldsize;
657
658 /* compute new wrapper msg size. */
659 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
660 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
661 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
662 req->rq_reqbuf->lm_buflens);
663 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
664
665 /* request from pool should always have enough buffer */
666 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
667
668 if (req->rq_reqbuf_len < newbuf_size) {
669 newbuf_size = size_roundup_power2(newbuf_size);
670
ee0ec194 671 newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
8b382089 672 if (!newbuf)
0a3bdb00 673 return -ENOMEM;
d7e09d03 674
61d7258b
OD
675 /* Must lock this, so that otherwise unprotected change of
676 * rq_reqmsg is not racing with parallel processing of
677 * imp_replay_list traversing threads. See LU-3333
678 * This is a bandaid at best, we really need to deal with this
679 * in request enlarging code before unpacking that's already
dadfcdab
OD
680 * there
681 */
61d7258b
OD
682 if (req->rq_import)
683 spin_lock(&req->rq_import->imp_lock);
684
d7e09d03
PT
685 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
686
ee0ec194 687 kvfree(req->rq_reqbuf);
d7e09d03
PT
688 req->rq_reqbuf = newbuf;
689 req->rq_reqbuf_len = newbuf_size;
690 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
691 PLAIN_PACK_MSG_OFF, 0);
61d7258b
OD
692
693 if (req->rq_import)
694 spin_unlock(&req->rq_import->imp_lock);
d7e09d03
PT
695 }
696
697 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
698 newmsg_size);
699 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
700
701 req->rq_reqlen = newmsg_size;
0a3bdb00 702 return 0;
d7e09d03
PT
703}
704
705/****************************************
706 * service apis *
707 ****************************************/
708
709static struct ptlrpc_svc_ctx plain_svc_ctx = {
710 .sc_refcount = ATOMIC_INIT(1),
711 .sc_policy = &plain_policy,
712};
713
714static
715int plain_accept(struct ptlrpc_request *req)
716{
d0bfef31 717 struct lustre_msg *msg = req->rq_reqbuf;
d7e09d03 718 struct plain_header *phdr;
d0bfef31 719 int swabbed;
d7e09d03
PT
720
721 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
722 SPTLRPC_POLICY_PLAIN);
723
724 if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
725 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
726 SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
727 SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
728 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
0a3bdb00 729 return SECSVC_DROP;
d7e09d03
PT
730 }
731
732 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
733 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
0a3bdb00 734 return SECSVC_DROP;
d7e09d03
PT
735 }
736
737 swabbed = ptlrpc_req_need_swab(req);
738
739 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
8b382089 740 if (!phdr) {
d7e09d03 741 CERROR("missing plain header\n");
0a3bdb00 742 return -EPROTO;
d7e09d03
PT
743 }
744
745 if (phdr->ph_ver != 0) {
746 CERROR("Invalid header version\n");
0a3bdb00 747 return -EPROTO;
d7e09d03
PT
748 }
749
750 if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
751 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
0a3bdb00 752 return -EPROTO;
d7e09d03
PT
753 }
754
755 req->rq_sp_from = phdr->ph_sp;
756 req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
757
758 if (phdr->ph_flags & PLAIN_FL_USER) {
759 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
760 swabbed)) {
761 CERROR("Mal-formed user descriptor\n");
0a3bdb00 762 return SECSVC_DROP;
d7e09d03
PT
763 }
764
765 req->rq_pack_udesc = 1;
766 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
767 }
768
769 if (phdr->ph_flags & PLAIN_FL_BULK) {
770 if (plain_unpack_bsd(msg, swabbed))
0a3bdb00 771 return SECSVC_DROP;
d7e09d03
PT
772
773 req->rq_pack_bulk = 1;
774 }
775
776 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
777 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
778
779 req->rq_svc_ctx = &plain_svc_ctx;
780 atomic_inc(&req->rq_svc_ctx->sc_refcount);
781
0a3bdb00 782 return SECSVC_OK;
d7e09d03
PT
783}
784
785static
786int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
787{
d0bfef31
CH
788 struct ptlrpc_reply_state *rs;
789 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
790 int rs_size = sizeof(*rs);
d7e09d03
PT
791
792 LASSERT(msgsize % 8 == 0);
793
794 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
795 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
796
797 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
798 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
799
800 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
801
802 rs = req->rq_reply_state;
803
804 if (rs) {
805 /* pre-allocated */
806 LASSERT(rs->rs_size >= rs_size);
807 } else {
ee0ec194 808 rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
8b382089 809 if (!rs)
0a3bdb00 810 return -ENOMEM;
d7e09d03
PT
811
812 rs->rs_size = rs_size;
813 }
814
815 rs->rs_svc_ctx = req->rq_svc_ctx;
816 atomic_inc(&req->rq_svc_ctx->sc_refcount);
9797fb0e 817 rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
d7e09d03
PT
818 rs->rs_repbuf_len = rs_size - sizeof(*rs);
819
820 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
821 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
822
823 req->rq_reply_state = rs;
0a3bdb00 824 return 0;
d7e09d03
PT
825}
826
827static
828void plain_free_rs(struct ptlrpc_reply_state *rs)
829{
d7e09d03
PT
830 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
831 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
832
833 if (!rs->rs_prealloc)
ee0ec194 834 kvfree(rs);
d7e09d03
PT
835}
836
837static
838int plain_authorize(struct ptlrpc_request *req)
839{
840 struct ptlrpc_reply_state *rs = req->rq_reply_state;
d0bfef31
CH
841 struct lustre_msg_v2 *msg = rs->rs_repbuf;
842 struct plain_header *phdr;
843 int len;
d7e09d03
PT
844
845 LASSERT(rs);
846 LASSERT(msg);
847
848 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
849 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
850 req->rq_replen, 1);
851 else
852 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
853
854 msg->lm_secflvr = req->rq_flvr.sf_rpc;
855
856 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
857 phdr->ph_ver = 0;
858 phdr->ph_flags = 0;
859 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
860
861 if (req->rq_pack_bulk)
862 phdr->ph_flags |= PLAIN_FL_BULK;
863
864 rs->rs_repdata_len = len;
865
866 if (likely(req->rq_packed_final)) {
867 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
868 req->rq_reply_off = plain_at_offset;
869 else
870 req->rq_reply_off = 0;
871 } else {
872 unsigned int hsize = 4;
873
874 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
24c198e9
OD
875 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
876 0),
877 lustre_msg_buflen(msg,
878 PLAIN_PACK_MSG_OFF),
879 NULL, 0, (unsigned char *)&msg->lm_cksum,
880 &hsize);
38bbb63d 881 req->rq_reply_off = 0;
d7e09d03
PT
882 }
883
0a3bdb00 884 return 0;
d7e09d03
PT
885}
886
887static
888int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
889 struct ptlrpc_bulk_desc *desc)
890{
d0bfef31 891 struct ptlrpc_reply_state *rs = req->rq_reply_state;
d7e09d03 892 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
d0bfef31
CH
893 struct plain_bulk_token *tokenr;
894 int rc;
d7e09d03
PT
895
896 LASSERT(req->rq_bulk_write);
897 LASSERT(req->rq_pack_bulk);
898
899 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
9797fb0e 900 tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
d7e09d03
PT
901 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
902
903 bsdv->bsd_version = 0;
904 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
905 bsdv->bsd_svc = bsdr->bsd_svc;
906 bsdv->bsd_flags = 0;
907
908 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
909 return 0;
910
911 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
912 tokenr);
913 if (rc) {
914 bsdv->bsd_flags |= BSD_FL_ERR;
915 CERROR("bulk write: server verify failed: %d\n", rc);
916 }
917
918 return rc;
919}
920
921static
922int plain_svc_wrap_bulk(struct ptlrpc_request *req,
923 struct ptlrpc_bulk_desc *desc)
924{
d0bfef31 925 struct ptlrpc_reply_state *rs = req->rq_reply_state;
d7e09d03 926 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
d0bfef31
CH
927 struct plain_bulk_token *tokenv;
928 int rc;
d7e09d03
PT
929
930 LASSERT(req->rq_bulk_read);
931 LASSERT(req->rq_pack_bulk);
932
933 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
934 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
9797fb0e 935 tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
d7e09d03
PT
936
937 bsdv->bsd_version = 0;
938 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
939 bsdv->bsd_svc = bsdr->bsd_svc;
940 bsdv->bsd_flags = 0;
941
942 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
943 return 0;
944
945 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
946 tokenv);
947 if (rc) {
2d00bd17
JP
948 CERROR("bulk read: server failed to compute checksum: %d\n",
949 rc);
d7e09d03
PT
950 } else {
951 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
952 corrupt_bulk_data(desc);
953 }
954
955 return rc;
956}
957
958static struct ptlrpc_ctx_ops plain_ctx_ops = {
959 .refresh = plain_ctx_refresh,
960 .validate = plain_ctx_validate,
961 .sign = plain_ctx_sign,
962 .verify = plain_ctx_verify,
963 .wrap_bulk = plain_cli_wrap_bulk,
964 .unwrap_bulk = plain_cli_unwrap_bulk,
965};
966
967static struct ptlrpc_sec_cops plain_sec_cops = {
968 .create_sec = plain_create_sec,
969 .destroy_sec = plain_destroy_sec,
970 .kill_sec = plain_kill_sec,
971 .lookup_ctx = plain_lookup_ctx,
972 .release_ctx = plain_release_ctx,
973 .flush_ctx_cache = plain_flush_ctx_cache,
974 .alloc_reqbuf = plain_alloc_reqbuf,
975 .free_reqbuf = plain_free_reqbuf,
976 .alloc_repbuf = plain_alloc_repbuf,
977 .free_repbuf = plain_free_repbuf,
978 .enlarge_reqbuf = plain_enlarge_reqbuf,
979};
980
981static struct ptlrpc_sec_sops plain_sec_sops = {
982 .accept = plain_accept,
983 .alloc_rs = plain_alloc_rs,
984 .authorize = plain_authorize,
985 .free_rs = plain_free_rs,
986 .unwrap_bulk = plain_svc_unwrap_bulk,
987 .wrap_bulk = plain_svc_wrap_bulk,
988};
989
990static struct ptlrpc_sec_policy plain_policy = {
991 .sp_owner = THIS_MODULE,
992 .sp_name = "plain",
993 .sp_policy = SPTLRPC_POLICY_PLAIN,
994 .sp_cops = &plain_sec_cops,
995 .sp_sops = &plain_sec_sops,
996};
997
998int sptlrpc_plain_init(void)
999{
1000 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1001 int rc;
1002
1003 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1004 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1005
1006 rc = sptlrpc_register_policy(&plain_policy);
1007 if (rc)
1008 CERROR("failed to register: %d\n", rc);
1009
1010 return rc;
1011}
1012
1013void sptlrpc_plain_fini(void)
1014{
1015 int rc;
1016
1017 rc = sptlrpc_unregister_policy(&plain_policy);
1018 if (rc)
1019 CERROR("cannot unregister: %d\n", rc);
1020}
This page took 0.711317 seconds and 5 git commands to generate.