crypto: nx - simplify pSeries nx842 driver
[deliverable/linux.git] / drivers / crypto / nx / nx-842-pseries.c
1 /*
2 * Driver for IBM Power 842 compression accelerator
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) IBM Corporation, 2012
19 *
20 * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
21 * Seth Jennings <sjenning@linux.vnet.ibm.com>
22 */
23
24 #include <asm/vio.h>
25
26 #include "nx-842.h"
27 #include "nx_csbcpb.h" /* struct nx_csbcpb */
28
29 #define MODULE_NAME NX842_PSERIES_MODULE_NAME
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
32 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
33
34 /* IO buffer must be 128 byte aligned */
35 #define IO_BUFFER_ALIGN 128
36
37 static struct nx842_constraints nx842_pseries_constraints = {
38 .alignment = IO_BUFFER_ALIGN,
39 .multiple = DDE_BUFFER_LAST_MULT,
40 .minimum = IO_BUFFER_ALIGN,
41 .maximum = PAGE_SIZE, /* dynamic, max_sync_size */
42 };
43
44 static int check_constraints(unsigned long buf, unsigned int *len, bool in)
45 {
46 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
47 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
48 in ? "input" : "output", buf,
49 nx842_pseries_constraints.alignment);
50 return -EINVAL;
51 }
52 if (*len % nx842_pseries_constraints.multiple) {
53 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
54 in ? "input" : "output", *len,
55 nx842_pseries_constraints.multiple);
56 if (in)
57 return -EINVAL;
58 *len = round_down(*len, nx842_pseries_constraints.multiple);
59 }
60 if (*len < nx842_pseries_constraints.minimum) {
61 pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
62 in ? "input" : "output", *len,
63 nx842_pseries_constraints.minimum);
64 return -EINVAL;
65 }
66 if (*len > nx842_pseries_constraints.maximum) {
67 pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
68 in ? "input" : "output", *len,
69 nx842_pseries_constraints.maximum);
70 if (in)
71 return -EINVAL;
72 *len = nx842_pseries_constraints.maximum;
73 }
74 return 0;
75 }
76
77 /* I assume we need to align the CSB? */
78 #define WORKMEM_ALIGN (256)
79
80 struct nx842_workmem {
81 /* scatterlist */
82 char slin[4096];
83 char slout[4096];
84 /* coprocessor status/parameter block */
85 struct nx_csbcpb csbcpb;
86
87 char padding[WORKMEM_ALIGN];
88 } __aligned(WORKMEM_ALIGN);
89
90 /* Macros for fields within nx_csbcpb */
91 /* Check the valid bit within the csbcpb valid field */
92 #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
93
94 /* CE macros operate on the completion_extension field bits in the csbcpb.
95 * CE0 0=full completion, 1=partial completion
96 * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
97 * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
98 #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
99 #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
100 #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
101
102 /* The NX unit accepts data only on 4K page boundaries */
103 #define NX842_HW_PAGE_SIZE (4096)
104 #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
105
106 enum nx842_status {
107 UNAVAILABLE,
108 AVAILABLE
109 };
110
111 struct ibm_nx842_counters {
112 atomic64_t comp_complete;
113 atomic64_t comp_failed;
114 atomic64_t decomp_complete;
115 atomic64_t decomp_failed;
116 atomic64_t swdecomp;
117 atomic64_t comp_times[32];
118 atomic64_t decomp_times[32];
119 };
120
121 static struct nx842_devdata {
122 struct vio_dev *vdev;
123 struct device *dev;
124 struct ibm_nx842_counters *counters;
125 unsigned int max_sg_len;
126 unsigned int max_sync_size;
127 unsigned int max_sync_sg;
128 enum nx842_status status;
129 } __rcu *devdata;
130 static DEFINE_SPINLOCK(devdata_mutex);
131
132 #define NX842_COUNTER_INC(_x) \
133 static inline void nx842_inc_##_x( \
134 const struct nx842_devdata *dev) { \
135 if (dev) \
136 atomic64_inc(&dev->counters->_x); \
137 }
138 NX842_COUNTER_INC(comp_complete);
139 NX842_COUNTER_INC(comp_failed);
140 NX842_COUNTER_INC(decomp_complete);
141 NX842_COUNTER_INC(decomp_failed);
142 NX842_COUNTER_INC(swdecomp);
143
144 #define NX842_HIST_SLOTS 16
145
146 static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
147 {
148 int bucket = fls(time);
149
150 if (bucket)
151 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
152
153 atomic64_inc(&times[bucket]);
154 }
155
156 /* NX unit operation flags */
157 #define NX842_OP_COMPRESS 0x0
158 #define NX842_OP_CRC 0x1
159 #define NX842_OP_DECOMPRESS 0x2
160 #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
161 #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
162 #define NX842_OP_ASYNC (1<<23)
163 #define NX842_OP_NOTIFY (1<<22)
164 #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
165
166 static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
167 {
168 /* No use of DMA mappings within the driver. */
169 return 0;
170 }
171
172 struct nx842_slentry {
173 unsigned long ptr; /* Real address (use __pa()) */
174 unsigned long len;
175 };
176
177 /* pHyp scatterlist entry */
178 struct nx842_scatterlist {
179 int entry_nr; /* number of slentries */
180 struct nx842_slentry *entries; /* ptr to array of slentries */
181 };
182
183 /* Does not include sizeof(entry_nr) in the size */
184 static inline unsigned long nx842_get_scatterlist_size(
185 struct nx842_scatterlist *sl)
186 {
187 return sl->entry_nr * sizeof(struct nx842_slentry);
188 }
189
190 static int nx842_build_scatterlist(unsigned long buf, int len,
191 struct nx842_scatterlist *sl)
192 {
193 unsigned long nextpage;
194 struct nx842_slentry *entry;
195
196 sl->entry_nr = 0;
197
198 entry = sl->entries;
199 while (len) {
200 entry->ptr = nx842_get_pa((void *)buf);
201 nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
202 if (nextpage < buf + len) {
203 /* we aren't at the end yet */
204 if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE))
205 /* we are in the middle (or beginning) */
206 entry->len = NX842_HW_PAGE_SIZE;
207 else
208 /* we are at the beginning */
209 entry->len = nextpage - buf;
210 } else {
211 /* at the end */
212 entry->len = len;
213 }
214
215 len -= entry->len;
216 buf += entry->len;
217 sl->entry_nr++;
218 entry++;
219 }
220
221 return 0;
222 }
223
224 static int nx842_validate_result(struct device *dev,
225 struct cop_status_block *csb)
226 {
227 /* The csb must be valid after returning from vio_h_cop_sync */
228 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
229 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
230 __func__);
231 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
232 csb->valid,
233 csb->crb_seq_number,
234 csb->completion_code,
235 csb->completion_extension);
236 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
237 csb->processed_byte_count,
238 (unsigned long)csb->address);
239 return -EIO;
240 }
241
242 /* Check return values from the hardware in the CSB */
243 switch (csb->completion_code) {
244 case 0: /* Completed without error */
245 break;
246 case 64: /* Target bytes > Source bytes during compression */
247 case 13: /* Output buffer too small */
248 dev_dbg(dev, "%s: Compression output larger than input\n",
249 __func__);
250 return -ENOSPC;
251 case 66: /* Input data contains an illegal template field */
252 case 67: /* Template indicates data past the end of the input stream */
253 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
254 __func__, csb->completion_code);
255 return -EINVAL;
256 default:
257 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
258 __func__, csb->completion_code);
259 return -EIO;
260 }
261
262 /* Hardware sanity check */
263 if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
264 dev_err(dev, "%s: No error returned by hardware, but "
265 "data returned is unusable, contact support.\n"
266 "(Additional info: csbcbp->processed bytes "
267 "does not specify processed bytes for the "
268 "target buffer.)\n", __func__);
269 return -EIO;
270 }
271
272 return 0;
273 }
274
275 /**
276 * nx842_pseries_compress - Compress data using the 842 algorithm
277 *
278 * Compression provide by the NX842 coprocessor on IBM Power systems.
279 * The input buffer is compressed and the result is stored in the
280 * provided output buffer.
281 *
282 * Upon return from this function @outlen contains the length of the
283 * compressed data. If there is an error then @outlen will be 0 and an
284 * error will be specified by the return code from this function.
285 *
286 * @in: Pointer to input buffer
287 * @inlen: Length of input buffer
288 * @out: Pointer to output buffer
289 * @outlen: Length of output buffer
290 * @wrkmem: ptr to buffer for working memory, size determined by
291 * NX842_MEM_COMPRESS
292 *
293 * Returns:
294 * 0 Success, output of length @outlen stored in the buffer at @out
295 * -ENOMEM Unable to allocate internal buffers
296 * -ENOSPC Output buffer is to small
297 * -EIO Internal error
298 * -ENODEV Hardware unavailable
299 */
300 static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
301 unsigned char *out, unsigned int *outlen,
302 void *wmem)
303 {
304 struct nx842_devdata *local_devdata;
305 struct device *dev = NULL;
306 struct nx842_workmem *workmem;
307 struct nx842_scatterlist slin, slout;
308 struct nx_csbcpb *csbcpb;
309 int ret = 0, max_sync_size;
310 unsigned long inbuf, outbuf;
311 struct vio_pfo_op op = {
312 .done = NULL,
313 .handle = 0,
314 .timeout = 0,
315 };
316 unsigned long start = get_tb();
317
318 inbuf = (unsigned long)in;
319 if (check_constraints(inbuf, &inlen, true))
320 return -EINVAL;
321
322 outbuf = (unsigned long)out;
323 if (check_constraints(outbuf, outlen, false))
324 return -EINVAL;
325
326 rcu_read_lock();
327 local_devdata = rcu_dereference(devdata);
328 if (!local_devdata || !local_devdata->dev) {
329 rcu_read_unlock();
330 return -ENODEV;
331 }
332 max_sync_size = local_devdata->max_sync_size;
333 dev = local_devdata->dev;
334
335 /* Init scatterlist */
336 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
337 slin.entries = (struct nx842_slentry *)workmem->slin;
338 slout.entries = (struct nx842_slentry *)workmem->slout;
339
340 /* Init operation */
341 op.flags = NX842_OP_COMPRESS;
342 csbcpb = &workmem->csbcpb;
343 memset(csbcpb, 0, sizeof(*csbcpb));
344 op.csbcpb = nx842_get_pa(csbcpb);
345 op.out = nx842_get_pa(slout.entries);
346
347 if ((inbuf & NX842_HW_PAGE_MASK) ==
348 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
349 /* Create direct DDE */
350 op.in = nx842_get_pa((void *)inbuf);
351 op.inlen = inlen;
352 } else {
353 /* Create indirect DDE (scatterlist) */
354 nx842_build_scatterlist(inbuf, inlen, &slin);
355 op.in = nx842_get_pa(slin.entries);
356 op.inlen = -nx842_get_scatterlist_size(&slin);
357 }
358
359 if ((outbuf & NX842_HW_PAGE_MASK) ==
360 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
361 /* Create direct DDE */
362 op.out = nx842_get_pa((void *)outbuf);
363 op.outlen = *outlen;
364 } else {
365 /* Create indirect DDE (scatterlist) */
366 nx842_build_scatterlist(outbuf, *outlen, &slout);
367 op.out = nx842_get_pa(slout.entries);
368 op.outlen = -nx842_get_scatterlist_size(&slout);
369 }
370
371 /* Send request to pHyp */
372 ret = vio_h_cop_sync(local_devdata->vdev, &op);
373
374 /* Check for pHyp error */
375 if (ret) {
376 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
377 __func__, ret, op.hcall_err);
378 ret = -EIO;
379 goto unlock;
380 }
381
382 /* Check for hardware error */
383 ret = nx842_validate_result(dev, &csbcpb->csb);
384 if (ret)
385 goto unlock;
386
387 *outlen = csbcpb->csb.processed_byte_count;
388 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
389
390 unlock:
391 if (ret)
392 nx842_inc_comp_failed(local_devdata);
393 else {
394 nx842_inc_comp_complete(local_devdata);
395 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
396 (get_tb() - start) / tb_ticks_per_usec);
397 }
398 rcu_read_unlock();
399 return ret;
400 }
401
402 /**
403 * nx842_pseries_decompress - Decompress data using the 842 algorithm
404 *
405 * Decompression provide by the NX842 coprocessor on IBM Power systems.
406 * The input buffer is decompressed and the result is stored in the
407 * provided output buffer. The size allocated to the output buffer is
408 * provided by the caller of this function in @outlen. Upon return from
409 * this function @outlen contains the length of the decompressed data.
410 * If there is an error then @outlen will be 0 and an error will be
411 * specified by the return code from this function.
412 *
413 * @in: Pointer to input buffer
414 * @inlen: Length of input buffer
415 * @out: Pointer to output buffer
416 * @outlen: Length of output buffer
417 * @wrkmem: ptr to buffer for working memory, size determined by
418 * NX842_MEM_COMPRESS
419 *
420 * Returns:
421 * 0 Success, output of length @outlen stored in the buffer at @out
422 * -ENODEV Hardware decompression device is unavailable
423 * -ENOMEM Unable to allocate internal buffers
424 * -ENOSPC Output buffer is to small
425 * -EINVAL Bad input data encountered when attempting decompress
426 * -EIO Internal error
427 */
428 static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
429 unsigned char *out, unsigned int *outlen,
430 void *wmem)
431 {
432 struct nx842_devdata *local_devdata;
433 struct device *dev = NULL;
434 struct nx842_workmem *workmem;
435 struct nx842_scatterlist slin, slout;
436 struct nx_csbcpb *csbcpb;
437 int ret = 0, max_sync_size;
438 unsigned long inbuf, outbuf;
439 struct vio_pfo_op op = {
440 .done = NULL,
441 .handle = 0,
442 .timeout = 0,
443 };
444 unsigned long start = get_tb();
445
446 /* Ensure page alignment and size */
447 inbuf = (unsigned long)in;
448 if (check_constraints(inbuf, &inlen, true))
449 return -EINVAL;
450
451 outbuf = (unsigned long)out;
452 if (check_constraints(outbuf, outlen, false))
453 return -EINVAL;
454
455 rcu_read_lock();
456 local_devdata = rcu_dereference(devdata);
457 if (!local_devdata || !local_devdata->dev) {
458 rcu_read_unlock();
459 return -ENODEV;
460 }
461 max_sync_size = local_devdata->max_sync_size;
462 dev = local_devdata->dev;
463
464 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
465
466 /* Init scatterlist */
467 slin.entries = (struct nx842_slentry *)workmem->slin;
468 slout.entries = (struct nx842_slentry *)workmem->slout;
469
470 /* Init operation */
471 op.flags = NX842_OP_DECOMPRESS;
472 csbcpb = &workmem->csbcpb;
473 memset(csbcpb, 0, sizeof(*csbcpb));
474 op.csbcpb = nx842_get_pa(csbcpb);
475
476 if ((inbuf & NX842_HW_PAGE_MASK) ==
477 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
478 /* Create direct DDE */
479 op.in = nx842_get_pa((void *)inbuf);
480 op.inlen = inlen;
481 } else {
482 /* Create indirect DDE (scatterlist) */
483 nx842_build_scatterlist(inbuf, inlen, &slin);
484 op.in = nx842_get_pa(slin.entries);
485 op.inlen = -nx842_get_scatterlist_size(&slin);
486 }
487
488 if ((outbuf & NX842_HW_PAGE_MASK) ==
489 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
490 /* Create direct DDE */
491 op.out = nx842_get_pa((void *)outbuf);
492 op.outlen = *outlen;
493 } else {
494 /* Create indirect DDE (scatterlist) */
495 nx842_build_scatterlist(outbuf, *outlen, &slout);
496 op.out = nx842_get_pa(slout.entries);
497 op.outlen = -nx842_get_scatterlist_size(&slout);
498 }
499
500 /* Send request to pHyp */
501 ret = vio_h_cop_sync(local_devdata->vdev, &op);
502
503 /* Check for pHyp error */
504 if (ret) {
505 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
506 __func__, ret, op.hcall_err);
507 goto unlock;
508 }
509
510 /* Check for hardware error */
511 ret = nx842_validate_result(dev, &csbcpb->csb);
512 if (ret)
513 goto unlock;
514
515 *outlen = csbcpb->csb.processed_byte_count;
516
517 unlock:
518 if (ret)
519 /* decompress fail */
520 nx842_inc_decomp_failed(local_devdata);
521 else {
522 nx842_inc_decomp_complete(local_devdata);
523 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
524 (get_tb() - start) / tb_ticks_per_usec);
525 }
526
527 rcu_read_unlock();
528 return ret;
529 }
530
531 /**
532 * nx842_OF_set_defaults -- Set default (disabled) values for devdata
533 *
534 * @devdata - struct nx842_devdata to update
535 *
536 * Returns:
537 * 0 on success
538 * -ENOENT if @devdata ptr is NULL
539 */
540 static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
541 {
542 if (devdata) {
543 devdata->max_sync_size = 0;
544 devdata->max_sync_sg = 0;
545 devdata->max_sg_len = 0;
546 devdata->status = UNAVAILABLE;
547 return 0;
548 } else
549 return -ENOENT;
550 }
551
552 /**
553 * nx842_OF_upd_status -- Update the device info from OF status prop
554 *
555 * The status property indicates if the accelerator is enabled. If the
556 * device is in the OF tree it indicates that the hardware is present.
557 * The status field indicates if the device is enabled when the status
558 * is 'okay'. Otherwise the device driver will be disabled.
559 *
560 * @devdata - struct nx842_devdata to update
561 * @prop - struct property point containing the maxsyncop for the update
562 *
563 * Returns:
564 * 0 - Device is available
565 * -EINVAL - Device is not available
566 */
567 static int nx842_OF_upd_status(struct nx842_devdata *devdata,
568 struct property *prop) {
569 int ret = 0;
570 const char *status = (const char *)prop->value;
571
572 if (!strncmp(status, "okay", (size_t)prop->length)) {
573 devdata->status = AVAILABLE;
574 } else {
575 dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
576 __func__, status);
577 devdata->status = UNAVAILABLE;
578 }
579
580 return ret;
581 }
582
583 /**
584 * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
585 *
586 * Definition of the 'ibm,max-sg-len' OF property:
587 * This field indicates the maximum byte length of a scatter list
588 * for the platform facility. It is a single cell encoded as with encode-int.
589 *
590 * Example:
591 * # od -x ibm,max-sg-len
592 * 0000000 0000 0ff0
593 *
594 * In this example, the maximum byte length of a scatter list is
595 * 0x0ff0 (4,080).
596 *
597 * @devdata - struct nx842_devdata to update
598 * @prop - struct property point containing the maxsyncop for the update
599 *
600 * Returns:
601 * 0 on success
602 * -EINVAL on failure
603 */
604 static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
605 struct property *prop) {
606 int ret = 0;
607 const int *maxsglen = prop->value;
608
609 if (prop->length != sizeof(*maxsglen)) {
610 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
611 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
612 prop->length, sizeof(*maxsglen));
613 ret = -EINVAL;
614 } else {
615 devdata->max_sg_len = (unsigned int)min(*maxsglen,
616 (int)NX842_HW_PAGE_SIZE);
617 }
618
619 return ret;
620 }
621
622 /**
623 * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
624 *
625 * Definition of the 'ibm,max-sync-cop' OF property:
626 * Two series of cells. The first series of cells represents the maximums
627 * that can be synchronously compressed. The second series of cells
628 * represents the maximums that can be synchronously decompressed.
629 * 1. The first cell in each series contains the count of the number of
630 * data length, scatter list elements pairs that follow – each being
631 * of the form
632 * a. One cell data byte length
633 * b. One cell total number of scatter list elements
634 *
635 * Example:
636 * # od -x ibm,max-sync-cop
637 * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001
638 * 0000020 0000 1000 0000 01fe
639 *
640 * In this example, compression supports 0x1000 (4,096) data byte length
641 * and 0x1fe (510) total scatter list elements. Decompression supports
642 * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
643 * elements.
644 *
645 * @devdata - struct nx842_devdata to update
646 * @prop - struct property point containing the maxsyncop for the update
647 *
648 * Returns:
649 * 0 on success
650 * -EINVAL on failure
651 */
652 static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
653 struct property *prop) {
654 int ret = 0;
655 const struct maxsynccop_t {
656 int comp_elements;
657 int comp_data_limit;
658 int comp_sg_limit;
659 int decomp_elements;
660 int decomp_data_limit;
661 int decomp_sg_limit;
662 } *maxsynccop;
663
664 if (prop->length != sizeof(*maxsynccop)) {
665 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
666 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
667 sizeof(*maxsynccop));
668 ret = -EINVAL;
669 goto out;
670 }
671
672 maxsynccop = (const struct maxsynccop_t *)prop->value;
673
674 /* Use one limit rather than separate limits for compression and
675 * decompression. Set a maximum for this so as not to exceed the
676 * size that the header can support and round the value down to
677 * the hardware page size (4K) */
678 devdata->max_sync_size =
679 (unsigned int)min(maxsynccop->comp_data_limit,
680 maxsynccop->decomp_data_limit);
681
682 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
683 65536);
684
685 if (devdata->max_sync_size < 4096) {
686 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
687 "less than the driver minimum, unable to use "
688 "the hardware device\n",
689 __func__, devdata->max_sync_size);
690 ret = -EINVAL;
691 goto out;
692 }
693
694 nx842_pseries_constraints.maximum = devdata->max_sync_size;
695
696 devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit,
697 maxsynccop->decomp_sg_limit);
698 if (devdata->max_sync_sg < 1) {
699 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
700 "less than the driver minimum, unable to use "
701 "the hardware device\n",
702 __func__, devdata->max_sync_sg);
703 ret = -EINVAL;
704 goto out;
705 }
706
707 out:
708 return ret;
709 }
710
711 /**
712 *
713 * nx842_OF_upd -- Handle OF properties updates for the device.
714 *
715 * Set all properties from the OF tree. Optionally, a new property
716 * can be provided by the @new_prop pointer to overwrite an existing value.
717 * The device will remain disabled until all values are valid, this function
718 * will return an error for updates unless all values are valid.
719 *
720 * @new_prop: If not NULL, this property is being updated. If NULL, update
721 * all properties from the current values in the OF tree.
722 *
723 * Returns:
724 * 0 - Success
725 * -ENOMEM - Could not allocate memory for new devdata structure
726 * -EINVAL - property value not found, new_prop is not a recognized
727 * property for the device or property value is not valid.
728 * -ENODEV - Device is not available
729 */
730 static int nx842_OF_upd(struct property *new_prop)
731 {
732 struct nx842_devdata *old_devdata = NULL;
733 struct nx842_devdata *new_devdata = NULL;
734 struct device_node *of_node = NULL;
735 struct property *status = NULL;
736 struct property *maxsglen = NULL;
737 struct property *maxsyncop = NULL;
738 int ret = 0;
739 unsigned long flags;
740
741 spin_lock_irqsave(&devdata_mutex, flags);
742 old_devdata = rcu_dereference_check(devdata,
743 lockdep_is_held(&devdata_mutex));
744 if (old_devdata)
745 of_node = old_devdata->dev->of_node;
746
747 if (!old_devdata || !of_node) {
748 pr_err("%s: device is not available\n", __func__);
749 spin_unlock_irqrestore(&devdata_mutex, flags);
750 return -ENODEV;
751 }
752
753 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
754 if (!new_devdata) {
755 dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
756 ret = -ENOMEM;
757 goto error_out;
758 }
759
760 memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
761 new_devdata->counters = old_devdata->counters;
762
763 /* Set ptrs for existing properties */
764 status = of_find_property(of_node, "status", NULL);
765 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
766 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
767 if (!status || !maxsglen || !maxsyncop) {
768 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
769 ret = -EINVAL;
770 goto error_out;
771 }
772
773 /*
774 * If this is a property update, there are only certain properties that
775 * we care about. Bail if it isn't in the below list
776 */
777 if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
778 strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
779 strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
780 goto out;
781
782 /* Perform property updates */
783 ret = nx842_OF_upd_status(new_devdata, status);
784 if (ret)
785 goto error_out;
786
787 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
788 if (ret)
789 goto error_out;
790
791 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
792 if (ret)
793 goto error_out;
794
795 out:
796 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
797 __func__, new_devdata->max_sync_size,
798 old_devdata->max_sync_size);
799 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
800 __func__, new_devdata->max_sync_sg,
801 old_devdata->max_sync_sg);
802 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
803 __func__, new_devdata->max_sg_len,
804 old_devdata->max_sg_len);
805
806 rcu_assign_pointer(devdata, new_devdata);
807 spin_unlock_irqrestore(&devdata_mutex, flags);
808 synchronize_rcu();
809 dev_set_drvdata(new_devdata->dev, new_devdata);
810 kfree(old_devdata);
811 return 0;
812
813 error_out:
814 if (new_devdata) {
815 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
816 nx842_OF_set_defaults(new_devdata);
817 rcu_assign_pointer(devdata, new_devdata);
818 spin_unlock_irqrestore(&devdata_mutex, flags);
819 synchronize_rcu();
820 dev_set_drvdata(new_devdata->dev, new_devdata);
821 kfree(old_devdata);
822 } else {
823 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
824 spin_unlock_irqrestore(&devdata_mutex, flags);
825 }
826
827 if (!ret)
828 ret = -EINVAL;
829 return ret;
830 }
831
832 /**
833 * nx842_OF_notifier - Process updates to OF properties for the device
834 *
835 * @np: notifier block
836 * @action: notifier action
837 * @update: struct pSeries_reconfig_prop_update pointer if action is
838 * PSERIES_UPDATE_PROPERTY
839 *
840 * Returns:
841 * NOTIFY_OK on success
842 * NOTIFY_BAD encoded with error number on failure, use
843 * notifier_to_errno() to decode this value
844 */
845 static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
846 void *data)
847 {
848 struct of_reconfig_data *upd = data;
849 struct nx842_devdata *local_devdata;
850 struct device_node *node = NULL;
851
852 rcu_read_lock();
853 local_devdata = rcu_dereference(devdata);
854 if (local_devdata)
855 node = local_devdata->dev->of_node;
856
857 if (local_devdata &&
858 action == OF_RECONFIG_UPDATE_PROPERTY &&
859 !strcmp(upd->dn->name, node->name)) {
860 rcu_read_unlock();
861 nx842_OF_upd(upd->prop);
862 } else
863 rcu_read_unlock();
864
865 return NOTIFY_OK;
866 }
867
868 static struct notifier_block nx842_of_nb = {
869 .notifier_call = nx842_OF_notifier,
870 };
871
872 #define nx842_counter_read(_name) \
873 static ssize_t nx842_##_name##_show(struct device *dev, \
874 struct device_attribute *attr, \
875 char *buf) { \
876 struct nx842_devdata *local_devdata; \
877 int p = 0; \
878 rcu_read_lock(); \
879 local_devdata = rcu_dereference(devdata); \
880 if (local_devdata) \
881 p = snprintf(buf, PAGE_SIZE, "%ld\n", \
882 atomic64_read(&local_devdata->counters->_name)); \
883 rcu_read_unlock(); \
884 return p; \
885 }
886
887 #define NX842DEV_COUNTER_ATTR_RO(_name) \
888 nx842_counter_read(_name); \
889 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
890 0444, \
891 nx842_##_name##_show,\
892 NULL);
893
894 NX842DEV_COUNTER_ATTR_RO(comp_complete);
895 NX842DEV_COUNTER_ATTR_RO(comp_failed);
896 NX842DEV_COUNTER_ATTR_RO(decomp_complete);
897 NX842DEV_COUNTER_ATTR_RO(decomp_failed);
898 NX842DEV_COUNTER_ATTR_RO(swdecomp);
899
900 static ssize_t nx842_timehist_show(struct device *,
901 struct device_attribute *, char *);
902
903 static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
904 nx842_timehist_show, NULL);
905 static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
906 0444, nx842_timehist_show, NULL);
907
908 static ssize_t nx842_timehist_show(struct device *dev,
909 struct device_attribute *attr, char *buf) {
910 char *p = buf;
911 struct nx842_devdata *local_devdata;
912 atomic64_t *times;
913 int bytes_remain = PAGE_SIZE;
914 int bytes;
915 int i;
916
917 rcu_read_lock();
918 local_devdata = rcu_dereference(devdata);
919 if (!local_devdata) {
920 rcu_read_unlock();
921 return 0;
922 }
923
924 if (attr == &dev_attr_comp_times)
925 times = local_devdata->counters->comp_times;
926 else if (attr == &dev_attr_decomp_times)
927 times = local_devdata->counters->decomp_times;
928 else {
929 rcu_read_unlock();
930 return 0;
931 }
932
933 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
934 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
935 i ? (2<<(i-1)) : 0, (2<<i)-1,
936 atomic64_read(&times[i]));
937 bytes_remain -= bytes;
938 p += bytes;
939 }
940 /* The last bucket holds everything over
941 * 2<<(NX842_HIST_SLOTS - 2) us */
942 bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
943 2<<(NX842_HIST_SLOTS - 2),
944 atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
945 p += bytes;
946
947 rcu_read_unlock();
948 return p - buf;
949 }
950
951 static struct attribute *nx842_sysfs_entries[] = {
952 &dev_attr_comp_complete.attr,
953 &dev_attr_comp_failed.attr,
954 &dev_attr_decomp_complete.attr,
955 &dev_attr_decomp_failed.attr,
956 &dev_attr_swdecomp.attr,
957 &dev_attr_comp_times.attr,
958 &dev_attr_decomp_times.attr,
959 NULL,
960 };
961
962 static struct attribute_group nx842_attribute_group = {
963 .name = NULL, /* put in device directory */
964 .attrs = nx842_sysfs_entries,
965 };
966
967 static struct nx842_driver nx842_pseries_driver = {
968 .owner = THIS_MODULE,
969 .constraints = &nx842_pseries_constraints,
970 .compress = nx842_pseries_compress,
971 .decompress = nx842_pseries_decompress,
972 };
973
974 static int __init nx842_probe(struct vio_dev *viodev,
975 const struct vio_device_id *id)
976 {
977 struct nx842_devdata *old_devdata, *new_devdata = NULL;
978 unsigned long flags;
979 int ret = 0;
980
981 spin_lock_irqsave(&devdata_mutex, flags);
982 old_devdata = rcu_dereference_check(devdata,
983 lockdep_is_held(&devdata_mutex));
984
985 if (old_devdata && old_devdata->vdev != NULL) {
986 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
987 ret = -1;
988 goto error_unlock;
989 }
990
991 dev_set_drvdata(&viodev->dev, NULL);
992
993 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
994 if (!new_devdata) {
995 dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
996 ret = -ENOMEM;
997 goto error_unlock;
998 }
999
1000 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
1001 GFP_NOFS);
1002 if (!new_devdata->counters) {
1003 dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
1004 ret = -ENOMEM;
1005 goto error_unlock;
1006 }
1007
1008 new_devdata->vdev = viodev;
1009 new_devdata->dev = &viodev->dev;
1010 nx842_OF_set_defaults(new_devdata);
1011
1012 rcu_assign_pointer(devdata, new_devdata);
1013 spin_unlock_irqrestore(&devdata_mutex, flags);
1014 synchronize_rcu();
1015 kfree(old_devdata);
1016
1017 of_reconfig_notifier_register(&nx842_of_nb);
1018
1019 ret = nx842_OF_upd(NULL);
1020 if (ret && ret != -ENODEV) {
1021 dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
1022 ret = -1;
1023 goto error;
1024 }
1025
1026 rcu_read_lock();
1027 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1028 rcu_read_unlock();
1029
1030 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1031 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1032 ret = -1;
1033 goto error;
1034 }
1035
1036 nx842_register_driver(&nx842_pseries_driver);
1037
1038 return 0;
1039
1040 error_unlock:
1041 spin_unlock_irqrestore(&devdata_mutex, flags);
1042 if (new_devdata)
1043 kfree(new_devdata->counters);
1044 kfree(new_devdata);
1045 error:
1046 return ret;
1047 }
1048
1049 static int __exit nx842_remove(struct vio_dev *viodev)
1050 {
1051 struct nx842_devdata *old_devdata;
1052 unsigned long flags;
1053
1054 pr_info("Removing IBM Power 842 compression device\n");
1055 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1056
1057 spin_lock_irqsave(&devdata_mutex, flags);
1058 old_devdata = rcu_dereference_check(devdata,
1059 lockdep_is_held(&devdata_mutex));
1060 of_reconfig_notifier_unregister(&nx842_of_nb);
1061 RCU_INIT_POINTER(devdata, NULL);
1062 spin_unlock_irqrestore(&devdata_mutex, flags);
1063 synchronize_rcu();
1064 dev_set_drvdata(&viodev->dev, NULL);
1065 if (old_devdata)
1066 kfree(old_devdata->counters);
1067 kfree(old_devdata);
1068
1069 nx842_unregister_driver(&nx842_pseries_driver);
1070
1071 return 0;
1072 }
1073
1074 static struct vio_device_id nx842_vio_driver_ids[] = {
1075 {NX842_PSERIES_COMPAT_NAME "-v1", NX842_PSERIES_COMPAT_NAME},
1076 {"", ""},
1077 };
1078
1079 static struct vio_driver nx842_vio_driver = {
1080 .name = MODULE_NAME,
1081 .probe = nx842_probe,
1082 .remove = __exit_p(nx842_remove),
1083 .get_desired_dma = nx842_get_desired_dma,
1084 .id_table = nx842_vio_driver_ids,
1085 };
1086
1087 static int __init nx842_init(void)
1088 {
1089 struct nx842_devdata *new_devdata;
1090 pr_info("Registering IBM Power 842 compression driver\n");
1091
1092 BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS);
1093
1094 RCU_INIT_POINTER(devdata, NULL);
1095 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1096 if (!new_devdata) {
1097 pr_err("Could not allocate memory for device data\n");
1098 return -ENOMEM;
1099 }
1100 new_devdata->status = UNAVAILABLE;
1101 RCU_INIT_POINTER(devdata, new_devdata);
1102
1103 return vio_register_driver(&nx842_vio_driver);
1104 }
1105
1106 module_init(nx842_init);
1107
1108 static void __exit nx842_exit(void)
1109 {
1110 struct nx842_devdata *old_devdata;
1111 unsigned long flags;
1112
1113 pr_info("Exiting IBM Power 842 compression driver\n");
1114 spin_lock_irqsave(&devdata_mutex, flags);
1115 old_devdata = rcu_dereference_check(devdata,
1116 lockdep_is_held(&devdata_mutex));
1117 RCU_INIT_POINTER(devdata, NULL);
1118 spin_unlock_irqrestore(&devdata_mutex, flags);
1119 synchronize_rcu();
1120 if (old_devdata && old_devdata->dev)
1121 dev_set_drvdata(old_devdata->dev, NULL);
1122 kfree(old_devdata);
1123 nx842_unregister_driver(&nx842_pseries_driver);
1124 vio_unregister_driver(&nx842_vio_driver);
1125 }
1126
1127 module_exit(nx842_exit);
1128
This page took 0.053365 seconds and 5 git commands to generate.