Merge remote-tracking branch 'cifs/for-next'
[deliverable/linux.git] / drivers / s390 / cio / device_status.c
1 /*
2 * Copyright IBM Corp. 2002
3 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
4 * Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Status accumulation and basic sense functions.
7 */
8
9 #include <linux/module.h>
10 #include <linux/init.h>
11
12 #include <asm/ccwdev.h>
13 #include <asm/cio.h>
14
15 #include "cio.h"
16 #include "cio_debug.h"
17 #include "css.h"
18 #include "device.h"
19 #include "ioasm.h"
20 #include "io_sch.h"
21
22 /*
23 * Check for any kind of channel or interface control check but don't
24 * issue the message for the console device
25 */
26 static void
27 ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
28 {
29 struct subchannel *sch = to_subchannel(cdev->dev.parent);
30 char dbf_text[15];
31
32 if (!scsw_is_valid_cstat(&irb->scsw) ||
33 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
34 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
35 return;
36 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
37 "received"
38 " ... device %04x on subchannel 0.%x.%04x, dev_stat "
39 ": %02X sch_stat : %02X\n",
40 cdev->private->dev_id.devno, sch->schid.ssid,
41 sch->schid.sch_no,
42 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
43 sprintf(dbf_text, "chk%x", sch->schid.sch_no);
44 CIO_TRACE_EVENT(0, dbf_text);
45 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
46 }
47
48 /*
49 * Some paths became not operational (pno bit in scsw is set).
50 */
51 static void
52 ccw_device_path_notoper(struct ccw_device *cdev)
53 {
54 struct subchannel *sch;
55
56 sch = to_subchannel(cdev->dev.parent);
57 if (cio_update_schib(sch))
58 goto doverify;
59
60 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
61 "not operational \n", __func__,
62 sch->schid.ssid, sch->schid.sch_no,
63 sch->schib.pmcw.pnom);
64
65 sch->lpm &= ~sch->schib.pmcw.pnom;
66 doverify:
67 cdev->private->flags.doverify = 1;
68 }
69
70 /*
71 * Copy valid bits from the extended control word to device irb.
72 */
73 static void
74 ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
75 {
76 /*
77 * Copy extended control bit if it is valid... yes there
78 * are condition that have to be met for the extended control
79 * bit to have meaning. Sick.
80 */
81 cdev->private->irb.scsw.cmd.ectl = 0;
82 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
83 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
84 cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
85 /* Check if extended control word is valid. */
86 if (!cdev->private->irb.scsw.cmd.ectl)
87 return;
88 /* Copy concurrent sense / model dependent information. */
89 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
90 }
91
92 /*
93 * Check if extended status word is valid.
94 */
95 static int
96 ccw_device_accumulate_esw_valid(struct irb *irb)
97 {
98 if (!irb->scsw.cmd.eswf &&
99 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
100 return 0;
101 if (irb->scsw.cmd.stctl ==
102 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
103 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
104 return 0;
105 return 1;
106 }
107
108 /*
109 * Copy valid bits from the extended status word to device irb.
110 */
111 static void
112 ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
113 {
114 struct irb *cdev_irb;
115 struct sublog *cdev_sublog, *sublog;
116
117 if (!ccw_device_accumulate_esw_valid(irb))
118 return;
119
120 cdev_irb = &cdev->private->irb;
121
122 /* Copy last path used mask. */
123 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
124
125 /* Copy subchannel logout information if esw is of format 0. */
126 if (irb->scsw.cmd.eswf) {
127 cdev_sublog = &cdev_irb->esw.esw0.sublog;
128 sublog = &irb->esw.esw0.sublog;
129 /* Copy extended status flags. */
130 cdev_sublog->esf = sublog->esf;
131 /*
132 * Copy fields that have a meaning for channel data check
133 * channel control check and interface control check.
134 */
135 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
136 SCHN_STAT_CHN_CTRL_CHK |
137 SCHN_STAT_INTF_CTRL_CHK)) {
138 /* Copy ancillary report bit. */
139 cdev_sublog->arep = sublog->arep;
140 /* Copy field-validity-flags. */
141 cdev_sublog->fvf = sublog->fvf;
142 /* Copy storage access code. */
143 cdev_sublog->sacc = sublog->sacc;
144 /* Copy termination code. */
145 cdev_sublog->termc = sublog->termc;
146 /* Copy sequence code. */
147 cdev_sublog->seqc = sublog->seqc;
148 }
149 /* Copy device status check. */
150 cdev_sublog->devsc = sublog->devsc;
151 /* Copy secondary error. */
152 cdev_sublog->serr = sublog->serr;
153 /* Copy i/o-error alert. */
154 cdev_sublog->ioerr = sublog->ioerr;
155 /* Copy channel path timeout bit. */
156 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
157 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
158 /* Copy failing storage address validity flag. */
159 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
160 if (cdev_irb->esw.esw0.erw.fsavf) {
161 /* ... and copy the failing storage address. */
162 memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
163 sizeof (irb->esw.esw0.faddr));
164 /* ... and copy the failing storage address format. */
165 cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
166 }
167 /* Copy secondary ccw address validity bit. */
168 cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
169 if (irb->esw.esw0.erw.scavf)
170 /* ... and copy the secondary ccw address. */
171 cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
172
173 }
174 /* FIXME: DCTI for format 2? */
175
176 /* Copy authorization bit. */
177 cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
178 /* Copy path verification required flag. */
179 cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
180 if (irb->esw.esw0.erw.pvrf)
181 cdev->private->flags.doverify = 1;
182 /* Copy concurrent sense bit. */
183 cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
184 if (irb->esw.esw0.erw.cons)
185 cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
186 }
187
188 /*
189 * Accumulate status from irb to devstat.
190 */
191 void
192 ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
193 {
194 struct irb *cdev_irb;
195
196 /*
197 * Check if the status pending bit is set in stctl.
198 * If not, the remaining bit have no meaning and we must ignore them.
199 * The esw is not meaningful as well...
200 */
201 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
202 return;
203
204 /* Check for channel checks and interface control checks. */
205 ccw_device_msg_control_check(cdev, irb);
206
207 /* Check for path not operational. */
208 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
209 ccw_device_path_notoper(cdev);
210 /* No irb accumulation for transport mode irbs. */
211 if (scsw_is_tm(&irb->scsw)) {
212 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
213 return;
214 }
215 /*
216 * Don't accumulate unsolicited interrupts.
217 */
218 if (!scsw_is_solicited(&irb->scsw))
219 return;
220
221 cdev_irb = &cdev->private->irb;
222
223 /*
224 * If the clear function had been performed, all formerly pending
225 * status at the subchannel has been cleared and we must not pass
226 * intermediate accumulated status to the device driver.
227 */
228 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
229 memset(&cdev->private->irb, 0, sizeof(struct irb));
230
231 /* Copy bits which are valid only for the start function. */
232 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
233 /* Copy key. */
234 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
235 /* Copy suspend control bit. */
236 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
237 /* Accumulate deferred condition code. */
238 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
239 /* Copy ccw format bit. */
240 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
241 /* Copy prefetch bit. */
242 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
243 /* Copy initial-status-interruption-control. */
244 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
245 /* Copy address limit checking control. */
246 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
247 /* Copy suppress suspend bit. */
248 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
249 }
250
251 /* Take care of the extended control bit and extended control word. */
252 ccw_device_accumulate_ecw(cdev, irb);
253
254 /* Accumulate function control. */
255 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
256 /* Copy activity control. */
257 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
258 /* Accumulate status control. */
259 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
260 /*
261 * Copy ccw address if it is valid. This is a bit simplified
262 * but should be close enough for all practical purposes.
263 */
264 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
265 ((irb->scsw.cmd.stctl ==
266 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
267 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
268 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
269 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
270 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
271 /* Accumulate device status, but not the device busy flag. */
272 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
273 /* dstat is not always valid. */
274 if (irb->scsw.cmd.stctl &
275 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
276 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
277 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
278 /* Accumulate subchannel status. */
279 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
280 /* Copy residual count if it is valid. */
281 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
282 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
283 == 0)
284 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
285
286 /* Take care of bits in the extended status word. */
287 ccw_device_accumulate_esw(cdev, irb);
288
289 /*
290 * Check whether we must issue a SENSE CCW ourselves if there is no
291 * concurrent sense facility installed for the subchannel.
292 * No sense is required if no delayed sense is pending
293 * and we did not get a unit check without sense information.
294 *
295 * Note: We should check for ioinfo[irq]->flags.consns but VM
296 * violates the ESA/390 architecture and doesn't present an
297 * operand exception for virtual devices without concurrent
298 * sense facility available/supported when enabling the
299 * concurrent sense facility.
300 */
301 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
302 !(cdev_irb->esw.esw0.erw.cons))
303 cdev->private->flags.dosense = 1;
304 }
305
306 /*
307 * Do a basic sense.
308 */
309 int
310 ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
311 {
312 struct subchannel *sch;
313 struct ccw1 *sense_ccw;
314 int rc;
315
316 sch = to_subchannel(cdev->dev.parent);
317
318 /* A sense is required, can we do it now ? */
319 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
320 /*
321 * we received an Unit Check but we have no final
322 * status yet, therefore we must delay the SENSE
323 * processing. We must not report this intermediate
324 * status to the device interrupt handler.
325 */
326 return -EBUSY;
327
328 /*
329 * We have ending status but no sense information. Do a basic sense.
330 */
331 sense_ccw = &to_io_private(sch)->sense_ccw;
332 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
333 sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
334 sense_ccw->count = SENSE_MAX_COUNT;
335 sense_ccw->flags = CCW_FLAG_SLI;
336
337 rc = cio_start(sch, sense_ccw, 0xff);
338 if (rc == -ENODEV || rc == -EACCES)
339 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
340 return rc;
341 }
342
343 /*
344 * Add information from basic sense to devstat.
345 */
346 void
347 ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
348 {
349 /*
350 * Check if the status pending bit is set in stctl.
351 * If not, the remaining bit have no meaning and we must ignore them.
352 * The esw is not meaningful as well...
353 */
354 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
355 return;
356
357 /* Check for channel checks and interface control checks. */
358 ccw_device_msg_control_check(cdev, irb);
359
360 /* Check for path not operational. */
361 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
362 ccw_device_path_notoper(cdev);
363
364 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
365 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
366 cdev->private->irb.esw.esw0.erw.cons = 1;
367 cdev->private->flags.dosense = 0;
368 }
369 /* Check if path verification is required. */
370 if (ccw_device_accumulate_esw_valid(irb) &&
371 irb->esw.esw0.erw.pvrf)
372 cdev->private->flags.doverify = 1;
373 }
374
375 /*
376 * This function accumulates the status into the private devstat and
377 * starts a basic sense if one is needed.
378 */
379 int
380 ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
381 {
382 ccw_device_accumulate_irb(cdev, irb);
383 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
384 return -EBUSY;
385 /* Check for basic sense. */
386 if (cdev->private->flags.dosense &&
387 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
388 cdev->private->irb.esw.esw0.erw.cons = 1;
389 cdev->private->flags.dosense = 0;
390 return 0;
391 }
392 if (cdev->private->flags.dosense) {
393 ccw_device_do_sense(cdev, irb);
394 return -EBUSY;
395 }
396 return 0;
397 }
398
This page took 0.055146 seconds and 6 git commands to generate.