2 * PowerNV OPAL Dump Interface
4 * Copyright 2013,2014 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kobject.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/pagemap.h>
17 #include <linux/delay.h>
21 #define DUMP_TYPE_FSP 0x01
25 struct bin_attribute dump_attr
;
26 uint32_t id
; /* becomes object name */
31 #define to_dump_obj(x) container_of(x, struct dump_obj, kobj)
33 struct dump_attribute
{
34 struct attribute attr
;
35 ssize_t (*show
)(struct dump_obj
*dump
, struct dump_attribute
*attr
,
37 ssize_t (*store
)(struct dump_obj
*dump
, struct dump_attribute
*attr
,
38 const char *buf
, size_t count
);
40 #define to_dump_attr(x) container_of(x, struct dump_attribute, attr)
42 static ssize_t
dump_id_show(struct dump_obj
*dump_obj
,
43 struct dump_attribute
*attr
,
46 return sprintf(buf
, "0x%x\n", dump_obj
->id
);
49 static const char* dump_type_to_string(uint32_t type
)
52 case 0x01: return "SP Dump";
53 case 0x02: return "System/Platform Dump";
54 case 0x03: return "SMA Dump";
55 default: return "unknown";
59 static ssize_t
dump_type_show(struct dump_obj
*dump_obj
,
60 struct dump_attribute
*attr
,
64 return sprintf(buf
, "0x%x %s\n", dump_obj
->type
,
65 dump_type_to_string(dump_obj
->type
));
68 static ssize_t
dump_ack_show(struct dump_obj
*dump_obj
,
69 struct dump_attribute
*attr
,
72 return sprintf(buf
, "ack - acknowledge dump\n");
76 * Send acknowledgement to OPAL
78 static int64_t dump_send_ack(uint32_t dump_id
)
82 rc
= opal_dump_ack(dump_id
);
84 pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n",
85 __func__
, dump_id
, rc
);
89 static void delay_release_kobj(void *kobj
)
91 kobject_put((struct kobject
*)kobj
);
94 static ssize_t
dump_ack_store(struct dump_obj
*dump_obj
,
95 struct dump_attribute
*attr
,
99 dump_send_ack(dump_obj
->id
);
100 sysfs_schedule_callback(&dump_obj
->kobj
, delay_release_kobj
,
101 &dump_obj
->kobj
, THIS_MODULE
);
105 /* Attributes of a dump
106 * The binary attribute of the dump itself is dynamic
107 * due to the dynamic size of the dump
109 static struct dump_attribute id_attribute
=
110 __ATTR(id
, 0666, dump_id_show
, NULL
);
111 static struct dump_attribute type_attribute
=
112 __ATTR(type
, 0666, dump_type_show
, NULL
);
113 static struct dump_attribute ack_attribute
=
114 __ATTR(acknowledge
, 0660, dump_ack_show
, dump_ack_store
);
116 static ssize_t
init_dump_show(struct dump_obj
*dump_obj
,
117 struct dump_attribute
*attr
,
120 return sprintf(buf
, "1 - initiate dump\n");
123 static int64_t dump_fips_init(uint8_t type
)
127 rc
= opal_dump_init(type
);
129 pr_warn("%s: Failed to initiate FipS dump (%d)\n",
134 static ssize_t
init_dump_store(struct dump_obj
*dump_obj
,
135 struct dump_attribute
*attr
,
139 dump_fips_init(DUMP_TYPE_FSP
);
140 pr_info("%s: Initiated FSP dump\n", __func__
);
144 static struct dump_attribute initiate_attribute
=
145 __ATTR(initiate_dump
, 0600, init_dump_show
, init_dump_store
);
147 static struct attribute
*initiate_attrs
[] = {
148 &initiate_attribute
.attr
,
152 static struct attribute_group initiate_attr_group
= {
153 .attrs
= initiate_attrs
,
156 static struct kset
*dump_kset
;
158 static ssize_t
dump_attr_show(struct kobject
*kobj
,
159 struct attribute
*attr
,
162 struct dump_attribute
*attribute
;
163 struct dump_obj
*dump
;
165 attribute
= to_dump_attr(attr
);
166 dump
= to_dump_obj(kobj
);
168 if (!attribute
->show
)
171 return attribute
->show(dump
, attribute
, buf
);
174 static ssize_t
dump_attr_store(struct kobject
*kobj
,
175 struct attribute
*attr
,
176 const char *buf
, size_t len
)
178 struct dump_attribute
*attribute
;
179 struct dump_obj
*dump
;
181 attribute
= to_dump_attr(attr
);
182 dump
= to_dump_obj(kobj
);
184 if (!attribute
->store
)
187 return attribute
->store(dump
, attribute
, buf
, len
);
190 static const struct sysfs_ops dump_sysfs_ops
= {
191 .show
= dump_attr_show
,
192 .store
= dump_attr_store
,
195 static void dump_release(struct kobject
*kobj
)
197 struct dump_obj
*dump
;
199 dump
= to_dump_obj(kobj
);
204 static struct attribute
*dump_default_attrs
[] = {
206 &type_attribute
.attr
,
211 static struct kobj_type dump_ktype
= {
212 .sysfs_ops
= &dump_sysfs_ops
,
213 .release
= &dump_release
,
214 .default_attrs
= dump_default_attrs
,
217 static void free_dump_sg_list(struct opal_sg_list
*list
)
219 struct opal_sg_list
*sg1
;
228 static struct opal_sg_list
*dump_data_to_sglist(struct dump_obj
*dump
)
230 struct opal_sg_list
*sg1
, *list
= NULL
;
237 sg1
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
242 sg1
->num_entries
= 0;
244 /* Translate virtual address to physical address */
245 sg1
->entry
[sg1
->num_entries
].data
=
246 (void *)(vmalloc_to_pfn(addr
) << PAGE_SHIFT
);
248 if (size
> PAGE_SIZE
)
249 sg1
->entry
[sg1
->num_entries
].length
= PAGE_SIZE
;
251 sg1
->entry
[sg1
->num_entries
].length
= size
;
254 if (sg1
->num_entries
>= SG_ENTRIES_PER_NODE
) {
255 sg1
->next
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
260 sg1
->num_entries
= 0;
268 pr_err("%s : Failed to allocate memory\n", __func__
);
269 free_dump_sg_list(list
);
273 static void sglist_to_phy_addr(struct opal_sg_list
*list
)
275 struct opal_sg_list
*sg
, *next
;
277 for (sg
= list
; sg
; sg
= next
) {
279 /* Don't translate NULL pointer for last entry */
281 sg
->next
= (struct opal_sg_list
*)__pa(sg
->next
);
285 /* Convert num_entries to length */
287 sg
->num_entries
* sizeof(struct opal_sg_entry
) + 16;
291 static int64_t dump_read_info(uint32_t *id
, uint32_t *size
, uint32_t *type
)
296 rc
= opal_dump_info2(id
, size
, type
);
298 if (rc
== OPAL_PARAMETER
)
299 rc
= opal_dump_info(id
, size
);
302 pr_warn("%s: Failed to get dump info (%d)\n",
307 static int64_t dump_read_data(struct dump_obj
*dump
)
309 struct opal_sg_list
*list
;
313 /* Allocate memory */
314 dump
->buffer
= vzalloc(PAGE_ALIGN(dump
->size
));
316 pr_err("%s : Failed to allocate memory\n", __func__
);
321 /* Generate SG list */
322 list
= dump_data_to_sglist(dump
);
328 /* Translate sg list addr to real address */
329 sglist_to_phy_addr(list
);
331 /* First entry address */
335 rc
= OPAL_BUSY_EVENT
;
336 while (rc
== OPAL_BUSY
|| rc
== OPAL_BUSY_EVENT
) {
337 rc
= opal_dump_read(dump
->id
, addr
);
338 if (rc
== OPAL_BUSY_EVENT
) {
339 opal_poll_events(NULL
);
344 if (rc
!= OPAL_SUCCESS
&& rc
!= OPAL_PARTIAL
)
345 pr_warn("%s: Extract dump failed for ID 0x%x\n",
349 free_dump_sg_list(list
);
355 static ssize_t
dump_attr_read(struct file
*filep
, struct kobject
*kobj
,
356 struct bin_attribute
*bin_attr
,
357 char *buffer
, loff_t pos
, size_t count
)
361 struct dump_obj
*dump
= to_dump_obj(kobj
);
364 rc
= dump_read_data(dump
);
366 if (rc
!= OPAL_SUCCESS
&& rc
!= OPAL_PARTIAL
) {
372 if (rc
== OPAL_PARTIAL
) {
373 /* On a partial read, we just return EIO
374 * and rely on userspace to ask us to try
377 pr_info("%s: Platform dump partially read.ID = 0x%x\n",
383 memcpy(buffer
, dump
->buffer
+ pos
, count
);
385 /* You may think we could free the dump buffer now and retrieve
386 * it again later if needed, but due to current firmware limitation,
387 * that's not the case. So, once read into userspace once,
388 * we keep the dump around until it's acknowledged by userspace.
394 static struct dump_obj
*create_dump_obj(uint32_t id
, size_t size
,
397 struct dump_obj
*dump
;
400 dump
= kzalloc(sizeof(*dump
), GFP_KERNEL
);
404 dump
->kobj
.kset
= dump_kset
;
406 kobject_init(&dump
->kobj
, &dump_ktype
);
408 sysfs_bin_attr_init(&dump
->dump_attr
);
410 dump
->dump_attr
.attr
.name
= "dump";
411 dump
->dump_attr
.attr
.mode
= 0400;
412 dump
->dump_attr
.size
= size
;
413 dump
->dump_attr
.read
= dump_attr_read
;
419 rc
= kobject_add(&dump
->kobj
, NULL
, "0x%x-0x%x", type
, id
);
421 kobject_put(&dump
->kobj
);
425 rc
= sysfs_create_bin_file(&dump
->kobj
, &dump
->dump_attr
);
427 kobject_put(&dump
->kobj
);
431 pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
432 __func__
, dump
->id
, dump
->size
);
434 kobject_uevent(&dump
->kobj
, KOBJ_ADD
);
439 static int process_dump(void)
442 uint32_t dump_id
, dump_size
, dump_type
;
443 struct dump_obj
*dump
;
446 rc
= dump_read_info(&dump_id
, &dump_size
, &dump_type
);
447 if (rc
!= OPAL_SUCCESS
)
450 sprintf(name
, "0x%x-0x%x", dump_type
, dump_id
);
452 /* we may get notified twice, let's handle
453 * that gracefully and not create two conflicting
456 if (kset_find_obj(dump_kset
, name
))
459 dump
= create_dump_obj(dump_id
, dump_size
, dump_type
);
466 static void dump_work_fn(struct work_struct
*work
)
471 static DECLARE_WORK(dump_work
, dump_work_fn
);
473 static void schedule_process_dump(void)
475 schedule_work(&dump_work
);
479 * New dump available notification
481 * Once we get notification, we add sysfs entries for it.
482 * We only fetch the dump on demand, and create sysfs asynchronously.
484 static int dump_event(struct notifier_block
*nb
,
485 unsigned long events
, void *change
)
487 if (events
& OPAL_EVENT_DUMP_AVAIL
)
488 schedule_process_dump();
493 static struct notifier_block dump_nb
= {
494 .notifier_call
= dump_event
,
499 void __init
opal_platform_dump_init(void)
503 dump_kset
= kset_create_and_add("dump", NULL
, opal_kobj
);
505 pr_warn("%s: Failed to create dump kset\n", __func__
);
509 rc
= sysfs_create_group(&dump_kset
->kobj
, &initiate_attr_group
);
511 pr_warn("%s: Failed to create initiate dump attr group\n",
513 kobject_put(&dump_kset
->kobj
);
517 rc
= opal_notifier_register(&dump_nb
);
519 pr_warn("%s: Can't register OPAL event notifier (%d)\n",
524 opal_dump_resend_notification();