staging:iio:events - new 64 bit code structure and push out drivers.
[deliverable/linux.git] / drivers / staging / iio / industrialio-ring.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of ring allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/device.h>
7026ea4b 18#include <linux/fs.h>
7026ea4b 19#include <linux/cdev.h>
5a0e3ad6 20#include <linux/slab.h>
a7348347 21#include <linux/poll.h>
7026ea4b
JC
22
23#include "iio.h"
df9c1c42 24#include "iio_core.h"
9dd1cb30 25#include "sysfs.h"
7026ea4b
JC
26#include "ring_generic.h"
27
8310b86c
JC
28static const char * const iio_endian_prefix[] = {
29 [IIO_BE] = "be",
30 [IIO_LE] = "le",
31};
7026ea4b
JC
32
33/**
b4281733 34 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
7026ea4b
JC
35 *
36 * This function relies on all ring buffer implementations having an
37 * iio_ring _bufer as their first element.
38 **/
1aa04278
JC
39ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf,
40 size_t n, loff_t *f_ps)
7026ea4b 41{
1aa04278
JC
42 struct iio_dev *indio_dev = filp->private_data;
43 struct iio_ring_buffer *rb = indio_dev->ring;
d5857d65 44
5565a450 45 if (!rb->access->read_first_n)
7026ea4b 46 return -EINVAL;
8d213f24 47 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
48}
49
a7348347
JC
50/**
51 * iio_ring_poll() - poll the ring to find out if it has data
52 */
1aa04278
JC
53unsigned int iio_ring_poll(struct file *filp,
54 struct poll_table_struct *wait)
a7348347 55{
1aa04278
JC
56 struct iio_dev *indio_dev = filp->private_data;
57 struct iio_ring_buffer *rb = indio_dev->ring;
a7348347
JC
58
59 poll_wait(filp, &rb->pollq, wait);
60 if (rb->stufftoread)
61 return POLLIN | POLLRDNORM;
62 /* need a way of knowing if there may be enough data... */
8d213f24 63 return 0;
a7348347
JC
64}
65
1aa04278 66void iio_chrdev_ring_open(struct iio_dev *indio_dev)
7026ea4b 67{
1aa04278
JC
68 struct iio_ring_buffer *rb = indio_dev->ring;
69 if (rb && rb->access->mark_in_use)
70 rb->access->mark_in_use(rb);
7026ea4b 71}
7026ea4b 72
1aa04278 73void iio_chrdev_ring_release(struct iio_dev *indio_dev)
7026ea4b 74{
1aa04278 75 struct iio_ring_buffer *rb = indio_dev->ring;
758d988c 76
1aa04278
JC
77 clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
78 if (rb->access->unmark_in_use)
79 rb->access->unmark_in_use(rb);
7026ea4b 80
7026ea4b
JC
81}
82
83void iio_ring_buffer_init(struct iio_ring_buffer *ring,
84 struct iio_dev *dev_info)
85{
7026ea4b 86 ring->indio_dev = dev_info;
a7348347 87 init_waitqueue_head(&ring->pollq);
7026ea4b
JC
88}
89EXPORT_SYMBOL(iio_ring_buffer_init);
90
1d892719 91static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
92 struct device_attribute *attr,
93 char *buf)
1d892719 94{
8d213f24 95 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
96}
97
98static ssize_t iio_show_fixed_type(struct device *dev,
99 struct device_attribute *attr,
100 char *buf)
101{
102 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
103 u8 type = this_attr->c->scan_type.endianness;
104
105 if (type == IIO_CPU) {
106 if (__LITTLE_ENDIAN)
107 type = IIO_LE;
108 else
109 type = IIO_BE;
110 }
111 return sprintf(buf, "%s:%c%d/%d>>%u\n",
112 iio_endian_prefix[type],
1d892719
JC
113 this_attr->c->scan_type.sign,
114 this_attr->c->scan_type.realbits,
115 this_attr->c->scan_type.storagebits,
116 this_attr->c->scan_type.shift);
117}
118
8d213f24
JC
119static ssize_t iio_scan_el_show(struct device *dev,
120 struct device_attribute *attr,
121 char *buf)
122{
123 int ret;
1aa04278 124 struct iio_dev *dev_info = dev_get_drvdata(dev);
8d213f24 125
1aa04278
JC
126 ret = iio_scan_mask_query(dev_info->ring,
127 to_iio_dev_attr(attr)->address);
8d213f24
JC
128 if (ret < 0)
129 return ret;
130 return sprintf(buf, "%d\n", ret);
131}
132
133static int iio_scan_mask_clear(struct iio_ring_buffer *ring, int bit)
134{
32b5eeca 135 clear_bit(bit, ring->scan_mask);
8d213f24
JC
136 ring->scan_count--;
137 return 0;
138}
139
140static ssize_t iio_scan_el_store(struct device *dev,
141 struct device_attribute *attr,
142 const char *buf,
143 size_t len)
144{
145 int ret = 0;
146 bool state;
1aa04278
JC
147 struct iio_dev *indio_dev = dev_get_drvdata(dev);
148 struct iio_ring_buffer *ring = indio_dev->ring;
8d213f24
JC
149 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
150
151 state = !(buf[0] == '0');
152 mutex_lock(&indio_dev->mlock);
153 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
154 ret = -EBUSY;
155 goto error_ret;
156 }
157 ret = iio_scan_mask_query(ring, this_attr->address);
158 if (ret < 0)
159 goto error_ret;
160 if (!state && ret) {
161 ret = iio_scan_mask_clear(ring, this_attr->address);
162 if (ret)
163 goto error_ret;
164 } else if (state && !ret) {
165 ret = iio_scan_mask_set(ring, this_attr->address);
166 if (ret)
167 goto error_ret;
168 }
169
170error_ret:
171 mutex_unlock(&indio_dev->mlock);
172
173 return ret ? ret : len;
174
175}
176
177static ssize_t iio_scan_el_ts_show(struct device *dev,
178 struct device_attribute *attr,
179 char *buf)
180{
1aa04278
JC
181 struct iio_dev *dev_info = dev_get_drvdata(dev);
182 return sprintf(buf, "%d\n", dev_info->ring->scan_timestamp);
8d213f24
JC
183}
184
185static ssize_t iio_scan_el_ts_store(struct device *dev,
186 struct device_attribute *attr,
187 const char *buf,
188 size_t len)
189{
190 int ret = 0;
1aa04278 191 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 192 bool state;
1aa04278 193
8d213f24
JC
194 state = !(buf[0] == '0');
195 mutex_lock(&indio_dev->mlock);
196 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
197 ret = -EBUSY;
198 goto error_ret;
199 }
1aa04278 200 indio_dev->ring->scan_timestamp = state;
8d213f24
JC
201error_ret:
202 mutex_unlock(&indio_dev->mlock);
203
204 return ret ? ret : len;
205}
206
1aa04278 207static int iio_ring_add_channel_sysfs(struct iio_dev *indio_dev,
1d892719
JC
208 const struct iio_chan_spec *chan)
209{
210 int ret;
1aa04278 211 struct iio_ring_buffer *ring = indio_dev->ring;
1d892719
JC
212
213 ret = __iio_add_chan_devattr("index", "scan_elements",
214 chan,
215 &iio_show_scan_index,
216 NULL,
217 0,
218 0,
1aa04278 219 &indio_dev->dev,
1d892719
JC
220 &ring->scan_el_dev_attr_list);
221 if (ret)
222 goto error_ret;
223
224 ret = __iio_add_chan_devattr("type", "scan_elements",
225 chan,
226 &iio_show_fixed_type,
227 NULL,
228 0,
229 0,
1aa04278 230 &indio_dev->dev,
1d892719 231 &ring->scan_el_dev_attr_list);
1d892719
JC
232 if (ret)
233 goto error_ret;
234
a88b3ebc
JC
235 if (chan->type != IIO_TIMESTAMP)
236 ret = __iio_add_chan_devattr("en", "scan_elements",
237 chan,
238 &iio_scan_el_show,
239 &iio_scan_el_store,
240 chan->scan_index,
241 0,
1aa04278 242 &indio_dev->dev,
a88b3ebc
JC
243 &ring->scan_el_dev_attr_list);
244 else
245 ret = __iio_add_chan_devattr("en", "scan_elements",
246 chan,
247 &iio_scan_el_ts_show,
248 &iio_scan_el_ts_store,
249 chan->scan_index,
250 0,
1aa04278 251 &indio_dev->dev,
a88b3ebc 252 &ring->scan_el_dev_attr_list);
1d892719
JC
253error_ret:
254 return ret;
255}
256
1aa04278 257static void iio_ring_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
1d892719
JC
258 struct iio_dev_attr *p)
259{
1aa04278 260 sysfs_remove_file_from_group(&indio_dev->dev.kobj,
1d892719
JC
261 &p->dev_attr.attr, "scan_elements");
262 kfree(p->dev_attr.attr.name);
263 kfree(p);
264}
265
266static struct attribute *iio_scan_el_dummy_attrs[] = {
267 NULL
268};
269
270static struct attribute_group iio_scan_el_dummy_group = {
271 .name = "scan_elements",
272 .attrs = iio_scan_el_dummy_attrs
273};
274
1aa04278 275static void __iio_ring_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
276{
277 struct iio_dev_attr *p, *n;
1aa04278 278 struct iio_ring_buffer *ring = indio_dev->ring;
a88b3ebc 279 int anydynamic = !list_empty(&ring->scan_el_dev_attr_list);
1d892719
JC
280 list_for_each_entry_safe(p, n,
281 &ring->scan_el_dev_attr_list, l)
1aa04278 282 iio_ring_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
283
284 if (ring->scan_el_attrs)
1aa04278 285 sysfs_remove_group(&indio_dev->dev.kobj,
1d892719
JC
286 ring->scan_el_attrs);
287 else if (anydynamic)
1aa04278 288 sysfs_remove_group(&indio_dev->dev.kobj,
1d892719
JC
289 &iio_scan_el_dummy_group);
290}
291
c009f7e4
JC
292int iio_ring_buffer_register(struct iio_dev *indio_dev,
293 const struct iio_chan_spec *channels,
294 int num_channels)
1d892719 295{
1aa04278 296 struct iio_ring_buffer *ring = indio_dev->ring;
1d892719 297 int ret, i;
758d988c 298
bf32963c 299 if (ring->scan_el_attrs) {
1aa04278 300 ret = sysfs_create_group(&indio_dev->dev.kobj,
bf32963c
MS
301 ring->scan_el_attrs);
302 if (ret) {
1aa04278 303 dev_err(&indio_dev->dev,
bf32963c 304 "Failed to add sysfs scan elements\n");
1aa04278 305 goto error_ret;
bf32963c 306 }
1d892719 307 } else if (channels) {
1aa04278 308 ret = sysfs_create_group(&indio_dev->dev.kobj,
1d892719
JC
309 &iio_scan_el_dummy_group);
310 if (ret)
1aa04278
JC
311 goto error_ret;
312 }
313 if (ring->attrs) {
314 ret = sysfs_create_group(&indio_dev->dev.kobj,
315 ring->attrs);
316 if (ret)
317 goto error_cleanup_dynamic;
bf32963c
MS
318 }
319
1d892719 320 INIT_LIST_HEAD(&ring->scan_el_dev_attr_list);
1d892719
JC
321 if (channels) {
322 /* new magic */
323 for (i = 0; i < num_channels; i++) {
32b5eeca
JC
324 /* Establish necessary mask length */
325 if (channels[i].scan_index >
326 (int)indio_dev->masklength - 1)
327 indio_dev->masklength
328 = indio_dev->channels[i].scan_index + 1;
329
1aa04278
JC
330 ret = iio_ring_add_channel_sysfs(indio_dev,
331 &channels[i]);
1d892719 332 if (ret < 0)
1aa04278 333 goto error_cleanup_group;
1d892719 334 }
32b5eeca
JC
335 if (indio_dev->masklength && ring->scan_mask == NULL) {
336 ring->scan_mask
337 = kzalloc(sizeof(*ring->scan_mask)*
338 BITS_TO_LONGS(indio_dev->masklength),
339 GFP_KERNEL);
340 if (ring->scan_mask == NULL) {
341 ret = -ENOMEM;
342 goto error_cleanup_group;
343 }
344 }
1d892719
JC
345 }
346
347 return 0;
1aa04278
JC
348error_cleanup_group:
349 if (ring->attrs)
350 sysfs_remove_group(&indio_dev->dev.kobj, ring->attrs);
1d892719 351error_cleanup_dynamic:
1aa04278 352 __iio_ring_attr_cleanup(indio_dev);
7026ea4b
JC
353error_ret:
354 return ret;
355}
c009f7e4 356EXPORT_SYMBOL(iio_ring_buffer_register);
1d892719 357
1aa04278 358void iio_ring_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 359{
32b5eeca 360 kfree(indio_dev->ring->scan_mask);
1aa04278
JC
361 if (indio_dev->ring->attrs)
362 sysfs_remove_group(&indio_dev->dev.kobj,
363 indio_dev->ring->attrs);
364 __iio_ring_attr_cleanup(indio_dev);
7026ea4b
JC
365}
366EXPORT_SYMBOL(iio_ring_buffer_unregister);
367
368ssize_t iio_read_ring_length(struct device *dev,
369 struct device_attribute *attr,
370 char *buf)
371{
1aa04278
JC
372 struct iio_dev *indio_dev = dev_get_drvdata(dev);
373 struct iio_ring_buffer *ring = indio_dev->ring;
7026ea4b 374
5565a450 375 if (ring->access->get_length)
8d213f24
JC
376 return sprintf(buf, "%d\n",
377 ring->access->get_length(ring));
7026ea4b 378
8d213f24 379 return 0;
7026ea4b
JC
380}
381EXPORT_SYMBOL(iio_read_ring_length);
382
0abd2428 383ssize_t iio_write_ring_length(struct device *dev,
7026ea4b
JC
384 struct device_attribute *attr,
385 const char *buf,
386 size_t len)
387{
388 int ret;
389 ulong val;
1aa04278
JC
390 struct iio_dev *indio_dev = dev_get_drvdata(dev);
391 struct iio_ring_buffer *ring = indio_dev->ring;
8d213f24 392
7026ea4b
JC
393 ret = strict_strtoul(buf, 10, &val);
394 if (ret)
395 return ret;
396
5565a450
JC
397 if (ring->access->get_length)
398 if (val == ring->access->get_length(ring))
7026ea4b
JC
399 return len;
400
5565a450
JC
401 if (ring->access->set_length) {
402 ring->access->set_length(ring, val);
403 if (ring->access->mark_param_change)
404 ring->access->mark_param_change(ring);
7026ea4b
JC
405 }
406
407 return len;
408}
409EXPORT_SYMBOL(iio_write_ring_length);
410
ffcab07a 411ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
7026ea4b
JC
412 struct device_attribute *attr,
413 char *buf)
414{
1aa04278
JC
415 struct iio_dev *indio_dev = dev_get_drvdata(dev);
416 struct iio_ring_buffer *ring = indio_dev->ring;
7026ea4b 417
5565a450 418 if (ring->access->get_bytes_per_datum)
8d213f24
JC
419 return sprintf(buf, "%d\n",
420 ring->access->get_bytes_per_datum(ring));
7026ea4b 421
8d213f24 422 return 0;
7026ea4b 423}
ffcab07a 424EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
7026ea4b
JC
425
426ssize_t iio_store_ring_enable(struct device *dev,
427 struct device_attribute *attr,
428 const char *buf,
429 size_t len)
430{
431 int ret;
432 bool requested_state, current_state;
433 int previous_mode;
1aa04278
JC
434 struct iio_dev *dev_info = dev_get_drvdata(dev);
435 struct iio_ring_buffer *ring = dev_info->ring;
7026ea4b
JC
436
437 mutex_lock(&dev_info->mlock);
438 previous_mode = dev_info->currentmode;
439 requested_state = !(buf[0] == '0');
440 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
441 if (current_state == requested_state) {
442 printk(KERN_INFO "iio-ring, current state requested again\n");
443 goto done;
444 }
445 if (requested_state) {
5565a450
JC
446 if (ring->setup_ops->preenable) {
447 ret = ring->setup_ops->preenable(dev_info);
7026ea4b
JC
448 if (ret) {
449 printk(KERN_ERR
450 "Buffer not started:"
451 "ring preenable failed\n");
452 goto error_ret;
453 }
454 }
5565a450
JC
455 if (ring->access->request_update) {
456 ret = ring->access->request_update(ring);
7026ea4b
JC
457 if (ret) {
458 printk(KERN_INFO
459 "Buffer not started:"
460 "ring parameter update failed\n");
461 goto error_ret;
462 }
463 }
5565a450
JC
464 if (ring->access->mark_in_use)
465 ring->access->mark_in_use(ring);
7026ea4b
JC
466 /* Definitely possible for devices to support both of these.*/
467 if (dev_info->modes & INDIO_RING_TRIGGERED) {
468 if (!dev_info->trig) {
469 printk(KERN_INFO
470 "Buffer not started: no trigger\n");
471 ret = -EINVAL;
5565a450
JC
472 if (ring->access->unmark_in_use)
473 ring->access->unmark_in_use(ring);
7026ea4b
JC
474 goto error_ret;
475 }
476 dev_info->currentmode = INDIO_RING_TRIGGERED;
477 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
478 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
479 else { /* should never be reached */
480 ret = -EINVAL;
481 goto error_ret;
482 }
483
5565a450 484 if (ring->setup_ops->postenable) {
5565a450 485 ret = ring->setup_ops->postenable(dev_info);
7026ea4b
JC
486 if (ret) {
487 printk(KERN_INFO
488 "Buffer not started:"
489 "postenable failed\n");
5565a450
JC
490 if (ring->access->unmark_in_use)
491 ring->access->unmark_in_use(ring);
7026ea4b 492 dev_info->currentmode = previous_mode;
5565a450
JC
493 if (ring->setup_ops->postdisable)
494 ring->setup_ops->postdisable(dev_info);
7026ea4b
JC
495 goto error_ret;
496 }
497 }
498 } else {
5565a450
JC
499 if (ring->setup_ops->predisable) {
500 ret = ring->setup_ops->predisable(dev_info);
7026ea4b
JC
501 if (ret)
502 goto error_ret;
503 }
5565a450
JC
504 if (ring->access->unmark_in_use)
505 ring->access->unmark_in_use(ring);
7026ea4b 506 dev_info->currentmode = INDIO_DIRECT_MODE;
5565a450
JC
507 if (ring->setup_ops->postdisable) {
508 ret = ring->setup_ops->postdisable(dev_info);
7026ea4b
JC
509 if (ret)
510 goto error_ret;
511 }
512 }
513done:
514 mutex_unlock(&dev_info->mlock);
515 return len;
516
517error_ret:
518 mutex_unlock(&dev_info->mlock);
519 return ret;
520}
521EXPORT_SYMBOL(iio_store_ring_enable);
8d213f24 522
7026ea4b
JC
523ssize_t iio_show_ring_enable(struct device *dev,
524 struct device_attribute *attr,
525 char *buf)
526{
1aa04278
JC
527 struct iio_dev *dev_info = dev_get_drvdata(dev);
528 return sprintf(buf, "%d\n", !!(dev_info->currentmode
7026ea4b
JC
529 & INDIO_ALL_RING_MODES));
530}
531EXPORT_SYMBOL(iio_show_ring_enable);
532
5565a450
JC
533int iio_sw_ring_preenable(struct iio_dev *indio_dev)
534{
535 struct iio_ring_buffer *ring = indio_dev->ring;
536 size_t size;
537 dev_dbg(&indio_dev->dev, "%s\n", __func__);
538 /* Check if there are any scan elements enabled, if not fail*/
539 if (!(ring->scan_count || ring->scan_timestamp))
540 return -EINVAL;
541 if (ring->scan_timestamp)
542 if (ring->scan_count)
543 /* Timestamp (aligned to s64) and data */
544 size = (((ring->scan_count * ring->bpe)
545 + sizeof(s64) - 1)
546 & ~(sizeof(s64) - 1))
547 + sizeof(s64);
548 else /* Timestamp only */
549 size = sizeof(s64);
550 else /* Data only */
551 size = ring->scan_count * ring->bpe;
552 ring->access->set_bytes_per_datum(ring, size);
553
554 return 0;
555}
556EXPORT_SYMBOL(iio_sw_ring_preenable);
32b5eeca
JC
557
558
559/* note NULL used as error indicator as it doesn't make sense. */
560static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
561 unsigned int masklength,
562 unsigned long *mask)
563{
564 if (bitmap_empty(mask, masklength))
565 return NULL;
566 while (*av_masks) {
567 if (bitmap_subset(mask, av_masks, masklength))
568 return av_masks;
569 av_masks += BITS_TO_LONGS(masklength);
570 }
571 return NULL;
572}
573
574/**
575 * iio_scan_mask_set() - set particular bit in the scan mask
576 * @ring: the ring buffer whose scan mask we are interested in
577 * @bit: the bit to be set.
578 **/
579int iio_scan_mask_set(struct iio_ring_buffer *ring, int bit)
580{
581 struct iio_dev *dev_info = ring->indio_dev;
582 unsigned long *mask;
583 unsigned long *trialmask;
584
585 trialmask = kmalloc(sizeof(*trialmask)*
586 BITS_TO_LONGS(dev_info->masklength),
587 GFP_KERNEL);
588
589 if (trialmask == NULL)
590 return -ENOMEM;
591 if (!dev_info->masklength) {
592 WARN_ON("trying to set scan mask prior to registering ring\n");
593 kfree(trialmask);
594 return -EINVAL;
595 }
596 bitmap_copy(trialmask, ring->scan_mask, dev_info->masklength);
597 set_bit(bit, trialmask);
598
599 if (dev_info->available_scan_masks) {
600 mask = iio_scan_mask_match(dev_info->available_scan_masks,
601 dev_info->masklength,
602 trialmask);
603 if (!mask) {
604 kfree(trialmask);
605 return -EINVAL;
606 }
607 }
608 bitmap_copy(ring->scan_mask, trialmask, dev_info->masklength);
609 ring->scan_count++;
610
611 kfree(trialmask);
612
613 return 0;
614};
615EXPORT_SYMBOL_GPL(iio_scan_mask_set);
616
617int iio_scan_mask_query(struct iio_ring_buffer *ring, int bit)
618{
619 struct iio_dev *dev_info = ring->indio_dev;
620 long *mask;
621
622 if (bit > dev_info->masklength)
623 return -EINVAL;
624
625 if (!ring->scan_mask)
626 return 0;
627 if (dev_info->available_scan_masks)
628 mask = iio_scan_mask_match(dev_info->available_scan_masks,
629 dev_info->masklength,
630 ring->scan_mask);
631 else
632 mask = ring->scan_mask;
633 if (!mask)
634 return 0;
635
636 return test_bit(bit, mask);
637};
638EXPORT_SYMBOL_GPL(iio_scan_mask_query);
This page took 0.24989 seconds and 5 git commands to generate.