[IA64-SGI] fixes for XPC disengage and open/close protocol
[deliverable/linux.git] / arch / ia64 / sn / kernel / xpc_channel.c
CommitLineData
89eb8eb9
DN
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) channel support.
12 *
13 * This is the part of XPC that manages the channels and
14 * sends/receives messages across them to/from other partitions.
15 *
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/cache.h>
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <asm/sn/bte.h>
26#include <asm/sn/sn_sal.h>
27#include "xpc.h"
28
29
30/*
31 * Set up the initial values for the XPartition Communication channels.
32 */
33static void
34xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
35{
36 int ch_number;
37 struct xpc_channel *ch;
38
39
40 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
41 ch = &part->channels[ch_number];
42
43 ch->partid = partid;
44 ch->number = ch_number;
45 ch->flags = XPC_C_DISCONNECTED;
46
47 ch->local_GP = &part->local_GPs[ch_number];
48 ch->local_openclose_args =
49 &part->local_openclose_args[ch_number];
50
51 atomic_set(&ch->kthreads_assigned, 0);
52 atomic_set(&ch->kthreads_idle, 0);
53 atomic_set(&ch->kthreads_active, 0);
54
55 atomic_set(&ch->references, 0);
56 atomic_set(&ch->n_to_notify, 0);
57
58 spin_lock_init(&ch->lock);
59 sema_init(&ch->msg_to_pull_sema, 1); /* mutex */
a607c389 60 sema_init(&ch->wdisconnect_sema, 0); /* event wait */
89eb8eb9
DN
61
62 atomic_set(&ch->n_on_msg_allocate_wq, 0);
63 init_waitqueue_head(&ch->msg_allocate_wq);
64 init_waitqueue_head(&ch->idle_wq);
65 }
66}
67
68
69/*
70 * Setup the infrastructure necessary to support XPartition Communication
71 * between the specified remote partition and the local one.
72 */
73enum xpc_retval
74xpc_setup_infrastructure(struct xpc_partition *part)
75{
59a0a8aa 76 int ret, cpuid;
89eb8eb9
DN
77 struct timer_list *timer;
78 partid_t partid = XPC_PARTID(part);
79
80
81 /*
82 * Zero out MOST of the entry for this partition. Only the fields
83 * starting with `nchannels' will be zeroed. The preceding fields must
84 * remain `viable' across partition ups and downs, since they may be
85 * referenced during this memset() operation.
86 */
87 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
88 offsetof(struct xpc_partition, nchannels));
89
90 /*
91 * Allocate all of the channel structures as a contiguous chunk of
92 * memory.
93 */
94 part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
95 GFP_KERNEL);
96 if (part->channels == NULL) {
97 dev_err(xpc_chan, "can't get memory for channels\n");
98 return xpcNoMemory;
99 }
100 memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
101
102 part->nchannels = XPC_NCHANNELS;
103
104
105 /* allocate all the required GET/PUT values */
106
107 part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
108 GFP_KERNEL, &part->local_GPs_base);
109 if (part->local_GPs == NULL) {
110 kfree(part->channels);
111 part->channels = NULL;
112 dev_err(xpc_chan, "can't get memory for local get/put "
113 "values\n");
114 return xpcNoMemory;
115 }
116 memset(part->local_GPs, 0, XPC_GP_SIZE);
117
118 part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
119 GFP_KERNEL, &part->remote_GPs_base);
120 if (part->remote_GPs == NULL) {
121 kfree(part->channels);
122 part->channels = NULL;
123 kfree(part->local_GPs_base);
124 part->local_GPs = NULL;
125 dev_err(xpc_chan, "can't get memory for remote get/put "
126 "values\n");
127 return xpcNoMemory;
128 }
129 memset(part->remote_GPs, 0, XPC_GP_SIZE);
130
131
132 /* allocate all the required open and close args */
133
134 part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
135 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
136 &part->local_openclose_args_base);
137 if (part->local_openclose_args == NULL) {
138 kfree(part->channels);
139 part->channels = NULL;
140 kfree(part->local_GPs_base);
141 part->local_GPs = NULL;
142 kfree(part->remote_GPs_base);
143 part->remote_GPs = NULL;
144 dev_err(xpc_chan, "can't get memory for local connect args\n");
145 return xpcNoMemory;
146 }
147 memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
148
149 part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
150 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
151 &part->remote_openclose_args_base);
152 if (part->remote_openclose_args == NULL) {
153 kfree(part->channels);
154 part->channels = NULL;
155 kfree(part->local_GPs_base);
156 part->local_GPs = NULL;
157 kfree(part->remote_GPs_base);
158 part->remote_GPs = NULL;
159 kfree(part->local_openclose_args_base);
160 part->local_openclose_args = NULL;
161 dev_err(xpc_chan, "can't get memory for remote connect args\n");
162 return xpcNoMemory;
163 }
164 memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
165
166
167 xpc_initialize_channels(part, partid);
168
169 atomic_set(&part->nchannels_active, 0);
a607c389 170 atomic_set(&part->nchannels_engaged, 0);
89eb8eb9
DN
171
172
173 /* local_IPI_amo were set to 0 by an earlier memset() */
174
175 /* Initialize this partitions AMO_t structure */
176 part->local_IPI_amo_va = xpc_IPI_init(partid);
177
178 spin_lock_init(&part->IPI_lock);
179
180 atomic_set(&part->channel_mgr_requests, 1);
181 init_waitqueue_head(&part->channel_mgr_wq);
182
183 sprintf(part->IPI_owner, "xpc%02d", partid);
184 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
185 part->IPI_owner, (void *) (u64) partid);
186 if (ret != 0) {
187 kfree(part->channels);
188 part->channels = NULL;
189 kfree(part->local_GPs_base);
190 part->local_GPs = NULL;
191 kfree(part->remote_GPs_base);
192 part->remote_GPs = NULL;
193 kfree(part->local_openclose_args_base);
194 part->local_openclose_args = NULL;
195 kfree(part->remote_openclose_args_base);
196 part->remote_openclose_args = NULL;
197 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
198 "errno=%d\n", -ret);
199 return xpcLackOfResources;
200 }
201
202 /* Setup a timer to check for dropped IPIs */
203 timer = &part->dropped_IPI_timer;
204 init_timer(timer);
205 timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
206 timer->data = (unsigned long) part;
207 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
208 add_timer(timer);
209
210 /*
211 * With the setting of the partition setup_state to XPC_P_SETUP, we're
212 * declaring that this partition is ready to go.
213 */
821fe947 214 part->setup_state = XPC_P_SETUP;
89eb8eb9
DN
215
216
217 /*
218 * Setup the per partition specific variables required by the
219 * remote partition to establish channel connections with us.
220 *
221 * The setting of the magic # indicates that these per partition
222 * specific variables are ready to be used.
223 */
224 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
225 xpc_vars_part[partid].openclose_args_pa =
226 __pa(part->local_openclose_args);
227 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
59a0a8aa
DN
228 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
229 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
230 xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
89eb8eb9 231 xpc_vars_part[partid].nchannels = part->nchannels;
821fe947 232 xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
89eb8eb9
DN
233
234 return xpcSuccess;
235}
236
237
238/*
239 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
240 * (or multiple cachelines) from a remote partition.
241 *
242 * src must be a cacheline aligned physical address on the remote partition.
243 * dst must be a cacheline aligned virtual address on this partition.
244 * cnt must be an cacheline sized
245 */
246static enum xpc_retval
247xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
248 const void *src, size_t cnt)
249{
250 bte_result_t bte_ret;
251
252
253 DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
254 DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
255 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
256
257 if (part->act_state == XPC_P_DEACTIVATING) {
258 return part->reason;
259 }
260
261 bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
262 (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
263 if (bte_ret == BTE_SUCCESS) {
264 return xpcSuccess;
265 }
266
267 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
268 XPC_PARTID(part), bte_ret);
269
270 return xpc_map_bte_errors(bte_ret);
271}
272
273
274/*
275 * Pull the remote per partititon specific variables from the specified
276 * partition.
277 */
278enum xpc_retval
279xpc_pull_remote_vars_part(struct xpc_partition *part)
280{
281 u8 buffer[L1_CACHE_BYTES * 2];
282 struct xpc_vars_part *pulled_entry_cacheline =
283 (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
284 struct xpc_vars_part *pulled_entry;
285 u64 remote_entry_cacheline_pa, remote_entry_pa;
286 partid_t partid = XPC_PARTID(part);
287 enum xpc_retval ret;
288
289
290 /* pull the cacheline that contains the variables we're interested in */
291
292 DBUG_ON(part->remote_vars_part_pa !=
293 L1_CACHE_ALIGN(part->remote_vars_part_pa));
294 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
295
296 remote_entry_pa = part->remote_vars_part_pa +
297 sn_partition_id * sizeof(struct xpc_vars_part);
298
299 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
300
301 pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
302 (remote_entry_pa & (L1_CACHE_BYTES - 1)));
303
304 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
305 (void *) remote_entry_cacheline_pa,
306 L1_CACHE_BYTES);
307 if (ret != xpcSuccess) {
308 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
309 "partition %d, ret=%d\n", partid, ret);
310 return ret;
311 }
312
313
314 /* see if they've been set up yet */
315
316 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
317 pulled_entry->magic != XPC_VP_MAGIC2) {
318
319 if (pulled_entry->magic != 0) {
320 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
321 "partition %d has bad magic value (=0x%lx)\n",
322 partid, sn_partition_id, pulled_entry->magic);
323 return xpcBadMagic;
324 }
325
326 /* they've not been initialized yet */
327 return xpcRetry;
328 }
329
330 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
331
332 /* validate the variables */
333
334 if (pulled_entry->GPs_pa == 0 ||
335 pulled_entry->openclose_args_pa == 0 ||
336 pulled_entry->IPI_amo_pa == 0) {
337
338 dev_err(xpc_chan, "partition %d's XPC vars_part for "
339 "partition %d are not valid\n", partid,
340 sn_partition_id);
341 return xpcInvalidAddress;
342 }
343
344 /* the variables we imported look to be valid */
345
346 part->remote_GPs_pa = pulled_entry->GPs_pa;
347 part->remote_openclose_args_pa =
348 pulled_entry->openclose_args_pa;
349 part->remote_IPI_amo_va =
350 (AMO_t *) __va(pulled_entry->IPI_amo_pa);
351 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
352 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
353
354 if (part->nchannels > pulled_entry->nchannels) {
355 part->nchannels = pulled_entry->nchannels;
356 }
357
358 /* let the other side know that we've pulled their variables */
359
821fe947 360 xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
89eb8eb9
DN
361 }
362
363 if (pulled_entry->magic == XPC_VP_MAGIC1) {
364 return xpcRetry;
365 }
366
367 return xpcSuccess;
368}
369
370
371/*
372 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
373 */
374static u64
375xpc_get_IPI_flags(struct xpc_partition *part)
376{
377 unsigned long irq_flags;
378 u64 IPI_amo;
379 enum xpc_retval ret;
380
381
382 /*
383 * See if there are any IPI flags to be handled.
384 */
385
386 spin_lock_irqsave(&part->IPI_lock, irq_flags);
387 if ((IPI_amo = part->local_IPI_amo) != 0) {
388 part->local_IPI_amo = 0;
389 }
390 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
391
392
393 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
394 ret = xpc_pull_remote_cachelines(part,
395 part->remote_openclose_args,
396 (void *) part->remote_openclose_args_pa,
397 XPC_OPENCLOSE_ARGS_SIZE);
398 if (ret != xpcSuccess) {
399 XPC_DEACTIVATE_PARTITION(part, ret);
400
401 dev_dbg(xpc_chan, "failed to pull openclose args from "
402 "partition %d, ret=%d\n", XPC_PARTID(part),
403 ret);
404
405 /* don't bother processing IPIs anymore */
406 IPI_amo = 0;
407 }
408 }
409
410 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
411 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
412 (void *) part->remote_GPs_pa,
413 XPC_GP_SIZE);
414 if (ret != xpcSuccess) {
415 XPC_DEACTIVATE_PARTITION(part, ret);
416
417 dev_dbg(xpc_chan, "failed to pull GPs from partition "
418 "%d, ret=%d\n", XPC_PARTID(part), ret);
419
420 /* don't bother processing IPIs anymore */
421 IPI_amo = 0;
422 }
423 }
424
425 return IPI_amo;
426}
427
428
429/*
430 * Allocate the local message queue and the notify queue.
431 */
432static enum xpc_retval
433xpc_allocate_local_msgqueue(struct xpc_channel *ch)
434{
435 unsigned long irq_flags;
436 int nentries;
437 size_t nbytes;
438
439
440 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
441 // >>> iterations of the for-loop, bail if set?
442
443 // >>> should we impose a minumum #of entries? like 4 or 8?
444 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
445
446 nbytes = nentries * ch->msg_size;
447 ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
448 (GFP_KERNEL | GFP_DMA),
449 &ch->local_msgqueue_base);
450 if (ch->local_msgqueue == NULL) {
451 continue;
452 }
453 memset(ch->local_msgqueue, 0, nbytes);
454
455 nbytes = nentries * sizeof(struct xpc_notify);
456 ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
457 if (ch->notify_queue == NULL) {
458 kfree(ch->local_msgqueue_base);
459 ch->local_msgqueue = NULL;
460 continue;
461 }
462 memset(ch->notify_queue, 0, nbytes);
463
464 spin_lock_irqsave(&ch->lock, irq_flags);
465 if (nentries < ch->local_nentries) {
466 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
467 "partid=%d, channel=%d\n", nentries,
468 ch->local_nentries, ch->partid, ch->number);
469
470 ch->local_nentries = nentries;
471 }
472 spin_unlock_irqrestore(&ch->lock, irq_flags);
473 return xpcSuccess;
474 }
475
476 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
477 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
478 return xpcNoMemory;
479}
480
481
482/*
483 * Allocate the cached remote message queue.
484 */
485static enum xpc_retval
486xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
487{
488 unsigned long irq_flags;
489 int nentries;
490 size_t nbytes;
491
492
493 DBUG_ON(ch->remote_nentries <= 0);
494
495 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
496 // >>> iterations of the for-loop, bail if set?
497
498 // >>> should we impose a minumum #of entries? like 4 or 8?
499 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
500
501 nbytes = nentries * ch->msg_size;
502 ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
503 (GFP_KERNEL | GFP_DMA),
504 &ch->remote_msgqueue_base);
505 if (ch->remote_msgqueue == NULL) {
506 continue;
507 }
508 memset(ch->remote_msgqueue, 0, nbytes);
509
510 spin_lock_irqsave(&ch->lock, irq_flags);
511 if (nentries < ch->remote_nentries) {
512 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
513 "partid=%d, channel=%d\n", nentries,
514 ch->remote_nentries, ch->partid, ch->number);
515
516 ch->remote_nentries = nentries;
517 }
518 spin_unlock_irqrestore(&ch->lock, irq_flags);
519 return xpcSuccess;
520 }
521
522 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
523 "partid=%d, channel=%d\n", ch->partid, ch->number);
524 return xpcNoMemory;
525}
526
527
528/*
529 * Allocate message queues and other stuff associated with a channel.
530 *
531 * Note: Assumes all of the channel sizes are filled in.
532 */
533static enum xpc_retval
534xpc_allocate_msgqueues(struct xpc_channel *ch)
535{
536 unsigned long irq_flags;
537 int i;
538 enum xpc_retval ret;
539
540
541 DBUG_ON(ch->flags & XPC_C_SETUP);
542
543 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
544 return ret;
545 }
546
547 if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
548 kfree(ch->local_msgqueue_base);
549 ch->local_msgqueue = NULL;
550 kfree(ch->notify_queue);
551 ch->notify_queue = NULL;
552 return ret;
553 }
554
555 for (i = 0; i < ch->local_nentries; i++) {
556 /* use a semaphore as an event wait queue */
557 sema_init(&ch->notify_queue[i].sema, 0);
558 }
559
89eb8eb9
DN
560 spin_lock_irqsave(&ch->lock, irq_flags);
561 ch->flags |= XPC_C_SETUP;
562 spin_unlock_irqrestore(&ch->lock, irq_flags);
563
564 return xpcSuccess;
565}
566
567
568/*
569 * Process a connect message from a remote partition.
570 *
571 * Note: xpc_process_connect() is expecting to be called with the
572 * spin_lock_irqsave held and will leave it locked upon return.
573 */
574static void
575xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
576{
577 enum xpc_retval ret;
578
579
580 DBUG_ON(!spin_is_locked(&ch->lock));
581
582 if (!(ch->flags & XPC_C_OPENREQUEST) ||
583 !(ch->flags & XPC_C_ROPENREQUEST)) {
584 /* nothing more to do for now */
585 return;
586 }
587 DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
588
589 if (!(ch->flags & XPC_C_SETUP)) {
590 spin_unlock_irqrestore(&ch->lock, *irq_flags);
591 ret = xpc_allocate_msgqueues(ch);
592 spin_lock_irqsave(&ch->lock, *irq_flags);
593
594 if (ret != xpcSuccess) {
595 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
596 }
597 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
598 return;
599 }
600
601 DBUG_ON(!(ch->flags & XPC_C_SETUP));
602 DBUG_ON(ch->local_msgqueue == NULL);
603 DBUG_ON(ch->remote_msgqueue == NULL);
604 }
605
606 if (!(ch->flags & XPC_C_OPENREPLY)) {
607 ch->flags |= XPC_C_OPENREPLY;
608 xpc_IPI_send_openreply(ch, irq_flags);
609 }
610
611 if (!(ch->flags & XPC_C_ROPENREPLY)) {
612 return;
613 }
614
615 DBUG_ON(ch->remote_msgqueue_pa == 0);
616
617 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
618
619 dev_info(xpc_chan, "channel %d to partition %d connected\n",
620 ch->number, ch->partid);
621
622 spin_unlock_irqrestore(&ch->lock, *irq_flags);
623 xpc_create_kthreads(ch, 1);
624 spin_lock_irqsave(&ch->lock, *irq_flags);
625}
626
627
a607c389
DN
628/*
629 * Notify those who wanted to be notified upon delivery of their message.
630 */
631static void
632xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
633{
634 struct xpc_notify *notify;
635 u8 notify_type;
636 s64 get = ch->w_remote_GP.get - 1;
637
638
639 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
640
641 notify = &ch->notify_queue[get % ch->local_nentries];
642
643 /*
644 * See if the notify entry indicates it was associated with
645 * a message who's sender wants to be notified. It is possible
646 * that it is, but someone else is doing or has done the
647 * notification.
648 */
649 notify_type = notify->type;
650 if (notify_type == 0 ||
651 cmpxchg(&notify->type, notify_type, 0) !=
652 notify_type) {
653 continue;
654 }
655
656 DBUG_ON(notify_type != XPC_N_CALL);
657
658 atomic_dec(&ch->n_to_notify);
659
660 if (notify->func != NULL) {
661 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
662 "msg_number=%ld, partid=%d, channel=%d\n",
663 (void *) notify, get, ch->partid, ch->number);
664
665 notify->func(reason, ch->partid, ch->number,
666 notify->key);
667
668 dev_dbg(xpc_chan, "notify->func() returned, "
669 "notify=0x%p, msg_number=%ld, partid=%d, "
670 "channel=%d\n", (void *) notify, get,
671 ch->partid, ch->number);
672 }
673 }
674}
675
676
89eb8eb9
DN
677/*
678 * Free up message queues and other stuff that were allocated for the specified
679 * channel.
680 *
681 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
682 * they're cleared when XPC_C_DISCONNECTED is cleared.
683 */
684static void
685xpc_free_msgqueues(struct xpc_channel *ch)
686{
687 DBUG_ON(!spin_is_locked(&ch->lock));
688 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
689
690 ch->remote_msgqueue_pa = 0;
691 ch->func = NULL;
692 ch->key = NULL;
693 ch->msg_size = 0;
694 ch->local_nentries = 0;
695 ch->remote_nentries = 0;
696 ch->kthreads_assigned_limit = 0;
697 ch->kthreads_idle_limit = 0;
698
699 ch->local_GP->get = 0;
700 ch->local_GP->put = 0;
701 ch->remote_GP.get = 0;
702 ch->remote_GP.put = 0;
703 ch->w_local_GP.get = 0;
704 ch->w_local_GP.put = 0;
705 ch->w_remote_GP.get = 0;
706 ch->w_remote_GP.put = 0;
707 ch->next_msg_to_pull = 0;
708
709 if (ch->flags & XPC_C_SETUP) {
710 ch->flags &= ~XPC_C_SETUP;
711
712 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
713 ch->flags, ch->partid, ch->number);
714
715 kfree(ch->local_msgqueue_base);
716 ch->local_msgqueue = NULL;
717 kfree(ch->remote_msgqueue_base);
718 ch->remote_msgqueue = NULL;
719 kfree(ch->notify_queue);
720 ch->notify_queue = NULL;
89eb8eb9
DN
721 }
722}
723
724
725/*
726 * spin_lock_irqsave() is expected to be held on entry.
727 */
728static void
729xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
730{
731 struct xpc_partition *part = &xpc_partitions[ch->partid];
a607c389 732 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
89eb8eb9
DN
733
734
735 DBUG_ON(!spin_is_locked(&ch->lock));
736
737 if (!(ch->flags & XPC_C_DISCONNECTING)) {
738 return;
739 }
740
741 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
742
743 /* make sure all activity has settled down first */
744
745 if (atomic_read(&ch->references) > 0) {
746 return;
747 }
748 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
749
a607c389
DN
750 if (part->act_state == XPC_P_DEACTIVATING) {
751 /* can't proceed until the other side disengages from us */
752 if (xpc_partition_engaged(1UL << ch->partid)) {
753 return;
754 }
89eb8eb9 755
a607c389 756 } else {
89eb8eb9
DN
757
758 /* as long as the other side is up do the full protocol */
759
760 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
761 return;
762 }
763
764 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
765 ch->flags |= XPC_C_CLOSEREPLY;
766 xpc_IPI_send_closereply(ch, irq_flags);
767 }
768
769 if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
770 return;
771 }
772 }
773
a607c389
DN
774 /* wake those waiting for notify completion */
775 if (atomic_read(&ch->n_to_notify) > 0) {
776 /* >>> we do callout while holding ch->lock */
777 xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put);
778 }
779
89eb8eb9
DN
780 /* both sides are disconnected now */
781
a607c389
DN
782 /* it's now safe to free the channel's message queues */
783 xpc_free_msgqueues(ch);
784
785 /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */
786 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
89eb8eb9
DN
787
788 atomic_dec(&part->nchannels_active);
789
a607c389 790 if (channel_was_connected) {
89eb8eb9
DN
791 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
792 "reason=%d\n", ch->number, ch->partid, ch->reason);
793 }
a607c389 794
a607c389
DN
795 if (ch->flags & XPC_C_WDISCONNECT) {
796 spin_unlock_irqrestore(&ch->lock, *irq_flags);
797 up(&ch->wdisconnect_sema);
798 spin_lock_irqsave(&ch->lock, *irq_flags);
e54af724
DN
799
800 } else if (ch->delayed_IPI_flags) {
801 if (part->act_state != XPC_P_DEACTIVATING) {
802 /* time to take action on any delayed IPI flags */
803 spin_lock(&part->IPI_lock);
804 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
805 ch->delayed_IPI_flags);
806 spin_unlock(&part->IPI_lock);
807 }
808 ch->delayed_IPI_flags = 0;
a607c389 809 }
89eb8eb9
DN
810}
811
812
813/*
814 * Process a change in the channel's remote connection state.
815 */
816static void
817xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
818 u8 IPI_flags)
819{
820 unsigned long irq_flags;
821 struct xpc_openclose_args *args =
822 &part->remote_openclose_args[ch_number];
823 struct xpc_channel *ch = &part->channels[ch_number];
824 enum xpc_retval reason;
825
826
827
828 spin_lock_irqsave(&ch->lock, irq_flags);
829
e54af724
DN
830again:
831
832 if ((ch->flags & XPC_C_DISCONNECTED) &&
833 (ch->flags & XPC_C_WDISCONNECT)) {
834 /*
835 * Delay processing IPI flags until thread waiting disconnect
836 * has had a chance to see that the channel is disconnected.
837 */
838 ch->delayed_IPI_flags |= IPI_flags;
839 spin_unlock_irqrestore(&ch->lock, irq_flags);
840 return;
841 }
842
89eb8eb9
DN
843
844 if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
845
846 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
847 "from partid=%d, channel=%d\n", args->reason,
848 ch->partid, ch->number);
849
850 /*
851 * If RCLOSEREQUEST is set, we're probably waiting for
852 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
a607c389 853 * with this RCLOSEREQUEST in the IPI_flags.
89eb8eb9
DN
854 */
855
856 if (ch->flags & XPC_C_RCLOSEREQUEST) {
857 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
858 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
859 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
860 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
861
862 DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
863 IPI_flags &= ~XPC_IPI_CLOSEREPLY;
864 ch->flags |= XPC_C_RCLOSEREPLY;
865
866 /* both sides have finished disconnecting */
867 xpc_process_disconnect(ch, &irq_flags);
e54af724
DN
868 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
869 goto again;
89eb8eb9
DN
870 }
871
872 if (ch->flags & XPC_C_DISCONNECTED) {
89eb8eb9 873 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
e54af724
DN
874 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
875 ch_number) & XPC_IPI_OPENREQUEST)) {
876
877 DBUG_ON(ch->delayed_IPI_flags != 0);
878 spin_lock(&part->IPI_lock);
879 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
880 ch_number,
881 XPC_IPI_CLOSEREQUEST);
882 spin_unlock(&part->IPI_lock);
883 }
89eb8eb9
DN
884 spin_unlock_irqrestore(&ch->lock, irq_flags);
885 return;
886 }
887
888 XPC_SET_REASON(ch, 0, 0);
889 ch->flags &= ~XPC_C_DISCONNECTED;
890
891 atomic_inc(&part->nchannels_active);
892 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
893 }
894
895 IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
896
897 /*
898 * The meaningful CLOSEREQUEST connection state fields are:
899 * reason = reason connection is to be closed
900 */
901
902 ch->flags |= XPC_C_RCLOSEREQUEST;
903
904 if (!(ch->flags & XPC_C_DISCONNECTING)) {
905 reason = args->reason;
906 if (reason <= xpcSuccess || reason > xpcUnknownReason) {
907 reason = xpcUnknownReason;
908 } else if (reason == xpcUnregistering) {
909 reason = xpcOtherUnregistering;
910 }
911
912 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
e54af724
DN
913
914 DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY);
915 spin_unlock_irqrestore(&ch->lock, irq_flags);
916 return;
89eb8eb9 917 }
e54af724
DN
918
919 xpc_process_disconnect(ch, &irq_flags);
89eb8eb9
DN
920 }
921
922
923 if (IPI_flags & XPC_IPI_CLOSEREPLY) {
924
925 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
926 " channel=%d\n", ch->partid, ch->number);
927
928 if (ch->flags & XPC_C_DISCONNECTED) {
929 DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
930 spin_unlock_irqrestore(&ch->lock, irq_flags);
931 return;
932 }
933
934 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
e54af724
DN
935
936 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
937 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
938 & XPC_IPI_CLOSEREQUEST)) {
939
940 DBUG_ON(ch->delayed_IPI_flags != 0);
941 spin_lock(&part->IPI_lock);
942 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
943 ch_number, XPC_IPI_CLOSEREPLY);
944 spin_unlock(&part->IPI_lock);
945 }
946 spin_unlock_irqrestore(&ch->lock, irq_flags);
947 return;
948 }
89eb8eb9
DN
949
950 ch->flags |= XPC_C_RCLOSEREPLY;
951
952 if (ch->flags & XPC_C_CLOSEREPLY) {
953 /* both sides have finished disconnecting */
954 xpc_process_disconnect(ch, &irq_flags);
955 }
956 }
957
958
959 if (IPI_flags & XPC_IPI_OPENREQUEST) {
960
961 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
962 "local_nentries=%d) received from partid=%d, "
963 "channel=%d\n", args->msg_size, args->local_nentries,
964 ch->partid, ch->number);
965
e54af724
DN
966 if (part->act_state == XPC_P_DEACTIVATING ||
967 (ch->flags & XPC_C_ROPENREQUEST)) {
968 spin_unlock_irqrestore(&ch->lock, irq_flags);
969 return;
970 }
971
972 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
973 ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST;
89eb8eb9
DN
974 spin_unlock_irqrestore(&ch->lock, irq_flags);
975 return;
976 }
977 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
978 XPC_C_OPENREQUEST)));
979 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
980 XPC_C_OPENREPLY | XPC_C_CONNECTED));
981
982 /*
983 * The meaningful OPENREQUEST connection state fields are:
984 * msg_size = size of channel's messages in bytes
985 * local_nentries = remote partition's local_nentries
986 */
e54af724
DN
987 if (args->msg_size == 0 || args->local_nentries == 0) {
988 /* assume OPENREQUEST was delayed by mistake */
989 spin_unlock_irqrestore(&ch->lock, irq_flags);
990 return;
991 }
89eb8eb9
DN
992
993 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
994 ch->remote_nentries = args->local_nentries;
995
996
997 if (ch->flags & XPC_C_OPENREQUEST) {
998 if (args->msg_size != ch->msg_size) {
999 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1000 &irq_flags);
1001 spin_unlock_irqrestore(&ch->lock, irq_flags);
1002 return;
1003 }
1004 } else {
1005 ch->msg_size = args->msg_size;
1006
1007 XPC_SET_REASON(ch, 0, 0);
1008 ch->flags &= ~XPC_C_DISCONNECTED;
1009
1010 atomic_inc(&part->nchannels_active);
1011 }
1012
1013 xpc_process_connect(ch, &irq_flags);
1014 }
1015
1016
1017 if (IPI_flags & XPC_IPI_OPENREPLY) {
1018
1019 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
1020 "local_nentries=%d, remote_nentries=%d) received from "
1021 "partid=%d, channel=%d\n", args->local_msgqueue_pa,
1022 args->local_nentries, args->remote_nentries,
1023 ch->partid, ch->number);
1024
1025 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
1026 spin_unlock_irqrestore(&ch->lock, irq_flags);
1027 return;
1028 }
e54af724
DN
1029 if (!(ch->flags & XPC_C_OPENREQUEST)) {
1030 XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
1031 &irq_flags);
1032 spin_unlock_irqrestore(&ch->lock, irq_flags);
1033 return;
1034 }
1035
89eb8eb9
DN
1036 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
1037 DBUG_ON(ch->flags & XPC_C_CONNECTED);
1038
1039 /*
1040 * The meaningful OPENREPLY connection state fields are:
1041 * local_msgqueue_pa = physical address of remote
1042 * partition's local_msgqueue
1043 * local_nentries = remote partition's local_nentries
1044 * remote_nentries = remote partition's remote_nentries
1045 */
1046 DBUG_ON(args->local_msgqueue_pa == 0);
1047 DBUG_ON(args->local_nentries == 0);
1048 DBUG_ON(args->remote_nentries == 0);
1049
1050 ch->flags |= XPC_C_ROPENREPLY;
1051 ch->remote_msgqueue_pa = args->local_msgqueue_pa;
1052
1053 if (args->local_nentries < ch->remote_nentries) {
1054 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
1055 "remote_nentries=%d, old remote_nentries=%d, "
1056 "partid=%d, channel=%d\n",
1057 args->local_nentries, ch->remote_nentries,
1058 ch->partid, ch->number);
1059
1060 ch->remote_nentries = args->local_nentries;
1061 }
1062 if (args->remote_nentries < ch->local_nentries) {
1063 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
1064 "local_nentries=%d, old local_nentries=%d, "
1065 "partid=%d, channel=%d\n",
1066 args->remote_nentries, ch->local_nentries,
1067 ch->partid, ch->number);
1068
1069 ch->local_nentries = args->remote_nentries;
1070 }
1071
1072 xpc_process_connect(ch, &irq_flags);
1073 }
1074
1075 spin_unlock_irqrestore(&ch->lock, irq_flags);
1076}
1077
1078
1079/*
1080 * Attempt to establish a channel connection to a remote partition.
1081 */
1082static enum xpc_retval
1083xpc_connect_channel(struct xpc_channel *ch)
1084{
1085 unsigned long irq_flags;
1086 struct xpc_registration *registration = &xpc_registrations[ch->number];
1087
1088
e54af724
DN
1089 if (down_trylock(&registration->sema) != 0) {
1090 return xpcRetry;
89eb8eb9
DN
1091 }
1092
1093 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1094 up(&registration->sema);
1095 return xpcUnregistered;
1096 }
1097
1098 spin_lock_irqsave(&ch->lock, irq_flags);
1099
1100 DBUG_ON(ch->flags & XPC_C_CONNECTED);
1101 DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
1102
1103 if (ch->flags & XPC_C_DISCONNECTING) {
1104 spin_unlock_irqrestore(&ch->lock, irq_flags);
1105 up(&registration->sema);
1106 return ch->reason;
1107 }
1108
1109
1110 /* add info from the channel connect registration to the channel */
1111
1112 ch->kthreads_assigned_limit = registration->assigned_limit;
1113 ch->kthreads_idle_limit = registration->idle_limit;
1114 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
1115 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
1116 DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
1117
1118 ch->func = registration->func;
1119 DBUG_ON(registration->func == NULL);
1120 ch->key = registration->key;
1121
1122 ch->local_nentries = registration->nentries;
1123
1124 if (ch->flags & XPC_C_ROPENREQUEST) {
1125 if (registration->msg_size != ch->msg_size) {
1126 /* the local and remote sides aren't the same */
1127
1128 /*
1129 * Because XPC_DISCONNECT_CHANNEL() can block we're
1130 * forced to up the registration sema before we unlock
1131 * the channel lock. But that's okay here because we're
1132 * done with the part that required the registration
1133 * sema. XPC_DISCONNECT_CHANNEL() requires that the
1134 * channel lock be locked and will unlock and relock
1135 * the channel lock as needed.
1136 */
1137 up(&registration->sema);
1138 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1139 &irq_flags);
1140 spin_unlock_irqrestore(&ch->lock, irq_flags);
1141 return xpcUnequalMsgSizes;
1142 }
1143 } else {
1144 ch->msg_size = registration->msg_size;
1145
1146 XPC_SET_REASON(ch, 0, 0);
1147 ch->flags &= ~XPC_C_DISCONNECTED;
1148
1149 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
1150 }
1151
1152 up(&registration->sema);
1153
1154
1155 /* initiate the connection */
1156
1157 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
1158 xpc_IPI_send_openrequest(ch, &irq_flags);
1159
1160 xpc_process_connect(ch, &irq_flags);
1161
1162 spin_unlock_irqrestore(&ch->lock, irq_flags);
1163
1164 return xpcSuccess;
1165}
1166
1167
89eb8eb9
DN
1168/*
1169 * Clear some of the msg flags in the local message queue.
1170 */
1171static inline void
1172xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1173{
1174 struct xpc_msg *msg;
1175 s64 get;
1176
1177
1178 get = ch->w_remote_GP.get;
1179 do {
1180 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1181 (get % ch->local_nentries) * ch->msg_size);
1182 msg->flags = 0;
1183 } while (++get < (volatile s64) ch->remote_GP.get);
1184}
1185
1186
1187/*
1188 * Clear some of the msg flags in the remote message queue.
1189 */
1190static inline void
1191xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1192{
1193 struct xpc_msg *msg;
1194 s64 put;
1195
1196
1197 put = ch->w_remote_GP.put;
1198 do {
1199 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
1200 (put % ch->remote_nentries) * ch->msg_size);
1201 msg->flags = 0;
1202 } while (++put < (volatile s64) ch->remote_GP.put);
1203}
1204
1205
1206static void
1207xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1208{
1209 struct xpc_channel *ch = &part->channels[ch_number];
1210 int nmsgs_sent;
1211
1212
1213 ch->remote_GP = part->remote_GPs[ch_number];
1214
1215
1216 /* See what, if anything, has changed for each connected channel */
1217
1218 xpc_msgqueue_ref(ch);
1219
1220 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1221 ch->w_remote_GP.put == ch->remote_GP.put) {
1222 /* nothing changed since GPs were last pulled */
1223 xpc_msgqueue_deref(ch);
1224 return;
1225 }
1226
1227 if (!(ch->flags & XPC_C_CONNECTED)){
1228 xpc_msgqueue_deref(ch);
1229 return;
1230 }
1231
1232
1233 /*
1234 * First check to see if messages recently sent by us have been
1235 * received by the other side. (The remote GET value will have
1236 * changed since we last looked at it.)
1237 */
1238
1239 if (ch->w_remote_GP.get != ch->remote_GP.get) {
1240
1241 /*
1242 * We need to notify any senders that want to be notified
1243 * that their sent messages have been received by their
1244 * intended recipients. We need to do this before updating
1245 * w_remote_GP.get so that we don't allocate the same message
1246 * queue entries prematurely (see xpc_allocate_msg()).
1247 */
1248 if (atomic_read(&ch->n_to_notify) > 0) {
1249 /*
1250 * Notify senders that messages sent have been
1251 * received and delivered by the other side.
1252 */
1253 xpc_notify_senders(ch, xpcMsgDelivered,
1254 ch->remote_GP.get);
1255 }
1256
1257 /*
1258 * Clear msg->flags in previously sent messages, so that
1259 * they're ready for xpc_allocate_msg().
1260 */
1261 xpc_clear_local_msgqueue_flags(ch);
1262
821fe947 1263 ch->w_remote_GP.get = ch->remote_GP.get;
89eb8eb9
DN
1264
1265 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1266 "channel=%d\n", ch->w_remote_GP.get, ch->partid,
1267 ch->number);
1268
1269 /*
1270 * If anyone was waiting for message queue entries to become
1271 * available, wake them up.
1272 */
1273 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
1274 wake_up(&ch->msg_allocate_wq);
1275 }
1276 }
1277
1278
1279 /*
1280 * Now check for newly sent messages by the other side. (The remote
1281 * PUT value will have changed since we last looked at it.)
1282 */
1283
1284 if (ch->w_remote_GP.put != ch->remote_GP.put) {
1285 /*
1286 * Clear msg->flags in previously received messages, so that
1287 * they're ready for xpc_get_deliverable_msg().
1288 */
1289 xpc_clear_remote_msgqueue_flags(ch);
1290
821fe947 1291 ch->w_remote_GP.put = ch->remote_GP.put;
89eb8eb9
DN
1292
1293 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1294 "channel=%d\n", ch->w_remote_GP.put, ch->partid,
1295 ch->number);
1296
1297 nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
1298 if (nmsgs_sent > 0) {
1299 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1300 "delivered=%d, partid=%d, channel=%d\n",
1301 nmsgs_sent, ch->partid, ch->number);
1302
1303 if (ch->flags & XPC_C_CONNECTCALLOUT) {
1304 xpc_activate_kthreads(ch, nmsgs_sent);
1305 }
1306 }
1307 }
1308
1309 xpc_msgqueue_deref(ch);
1310}
1311
1312
1313void
1314xpc_process_channel_activity(struct xpc_partition *part)
1315{
1316 unsigned long irq_flags;
1317 u64 IPI_amo, IPI_flags;
1318 struct xpc_channel *ch;
1319 int ch_number;
a607c389 1320 u32 ch_flags;
89eb8eb9
DN
1321
1322
1323 IPI_amo = xpc_get_IPI_flags(part);
1324
1325 /*
1326 * Initiate channel connections for registered channels.
1327 *
1328 * For each connected channel that has pending messages activate idle
1329 * kthreads and/or create new kthreads as needed.
1330 */
1331
1332 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1333 ch = &part->channels[ch_number];
1334
1335
1336 /*
1337 * Process any open or close related IPI flags, and then deal
1338 * with connecting or disconnecting the channel as required.
1339 */
1340
1341 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
1342
1343 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
1344 xpc_process_openclose_IPI(part, ch_number, IPI_flags);
1345 }
1346
a607c389 1347 ch_flags = ch->flags; /* need an atomic snapshot of flags */
89eb8eb9 1348
a607c389 1349 if (ch_flags & XPC_C_DISCONNECTING) {
89eb8eb9
DN
1350 spin_lock_irqsave(&ch->lock, irq_flags);
1351 xpc_process_disconnect(ch, &irq_flags);
1352 spin_unlock_irqrestore(&ch->lock, irq_flags);
1353 continue;
1354 }
1355
1356 if (part->act_state == XPC_P_DEACTIVATING) {
1357 continue;
1358 }
1359
a607c389
DN
1360 if (!(ch_flags & XPC_C_CONNECTED)) {
1361 if (!(ch_flags & XPC_C_OPENREQUEST)) {
1362 DBUG_ON(ch_flags & XPC_C_SETUP);
89eb8eb9
DN
1363 (void) xpc_connect_channel(ch);
1364 } else {
1365 spin_lock_irqsave(&ch->lock, irq_flags);
1366 xpc_process_connect(ch, &irq_flags);
1367 spin_unlock_irqrestore(&ch->lock, irq_flags);
1368 }
1369 continue;
1370 }
1371
1372
1373 /*
1374 * Process any message related IPI flags, this may involve the
1375 * activation of kthreads to deliver any pending messages sent
1376 * from the other partition.
1377 */
1378
1379 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
1380 xpc_process_msg_IPI(part, ch_number);
1381 }
1382 }
1383}
1384
1385
1386/*
a607c389
DN
1387 * XPC's heartbeat code calls this function to inform XPC that a partition is
1388 * going down. XPC responds by tearing down the XPartition Communication
89eb8eb9
DN
1389 * infrastructure used for the just downed partition.
1390 *
1391 * XPC's heartbeat code will never call this function and xpc_partition_up()
1392 * at the same time. Nor will it ever make multiple calls to either function
1393 * at the same time.
1394 */
1395void
a607c389 1396xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
89eb8eb9
DN
1397{
1398 unsigned long irq_flags;
1399 int ch_number;
1400 struct xpc_channel *ch;
1401
1402
1403 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
1404 XPC_PARTID(part), reason);
1405
1406 if (!xpc_part_ref(part)) {
1407 /* infrastructure for this partition isn't currently set up */
1408 return;
1409 }
1410
1411
a607c389 1412 /* disconnect channels associated with the partition going down */
89eb8eb9
DN
1413
1414 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1415 ch = &part->channels[ch_number];
1416
89eb8eb9
DN
1417 xpc_msgqueue_ref(ch);
1418 spin_lock_irqsave(&ch->lock, irq_flags);
1419
1420 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
1421
1422 spin_unlock_irqrestore(&ch->lock, irq_flags);
1423 xpc_msgqueue_deref(ch);
1424 }
1425
1426 xpc_wakeup_channel_mgr(part);
1427
1428 xpc_part_deref(part);
1429}
1430
1431
1432/*
1433 * Teardown the infrastructure necessary to support XPartition Communication
1434 * between the specified remote partition and the local one.
1435 */
1436void
1437xpc_teardown_infrastructure(struct xpc_partition *part)
1438{
1439 partid_t partid = XPC_PARTID(part);
1440
1441
1442 /*
1443 * We start off by making this partition inaccessible to local
1444 * processes by marking it as no longer setup. Then we make it
1445 * inaccessible to remote processes by clearing the XPC per partition
1446 * specific variable's magic # (which indicates that these variables
1447 * are no longer valid) and by ignoring all XPC notify IPIs sent to
1448 * this partition.
1449 */
1450
a607c389 1451 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
89eb8eb9
DN
1452 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1453 DBUG_ON(part->setup_state != XPC_P_SETUP);
1454 part->setup_state = XPC_P_WTEARDOWN;
1455
1456 xpc_vars_part[partid].magic = 0;
1457
1458
1459 free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
1460
1461
1462 /*
1463 * Before proceding with the teardown we have to wait until all
1464 * existing references cease.
1465 */
1466 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1467
1468
1469 /* now we can begin tearing down the infrastructure */
1470
1471 part->setup_state = XPC_P_TORNDOWN;
1472
1473 /* in case we've still got outstanding timers registered... */
1474 del_timer_sync(&part->dropped_IPI_timer);
1475
1476 kfree(part->remote_openclose_args_base);
1477 part->remote_openclose_args = NULL;
1478 kfree(part->local_openclose_args_base);
1479 part->local_openclose_args = NULL;
1480 kfree(part->remote_GPs_base);
1481 part->remote_GPs = NULL;
1482 kfree(part->local_GPs_base);
1483 part->local_GPs = NULL;
1484 kfree(part->channels);
1485 part->channels = NULL;
1486 part->local_IPI_amo_va = NULL;
1487}
1488
1489
1490/*
1491 * Called by XP at the time of channel connection registration to cause
1492 * XPC to establish connections to all currently active partitions.
1493 */
1494void
1495xpc_initiate_connect(int ch_number)
1496{
1497 partid_t partid;
1498 struct xpc_partition *part;
1499 struct xpc_channel *ch;
1500
1501
1502 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1503
1504 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1505 part = &xpc_partitions[partid];
1506
1507 if (xpc_part_ref(part)) {
1508 ch = &part->channels[ch_number];
1509
e54af724
DN
1510 /*
1511 * Initiate the establishment of a connection on the
1512 * newly registered channel to the remote partition.
1513 */
1514 xpc_wakeup_channel_mgr(part);
89eb8eb9
DN
1515 xpc_part_deref(part);
1516 }
1517 }
1518}
1519
1520
1521void
1522xpc_connected_callout(struct xpc_channel *ch)
1523{
89eb8eb9
DN
1524 /* let the registerer know that a connection has been established */
1525
1526 if (ch->func != NULL) {
1527 dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
1528 "partid=%d, channel=%d\n", ch->partid, ch->number);
1529
1530 ch->func(xpcConnected, ch->partid, ch->number,
1531 (void *) (u64) ch->local_nentries, ch->key);
1532
1533 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
1534 "partid=%d, channel=%d\n", ch->partid, ch->number);
1535 }
89eb8eb9
DN
1536}
1537
1538
1539/*
1540 * Called by XP at the time of channel connection unregistration to cause
1541 * XPC to teardown all current connections for the specified channel.
1542 *
1543 * Before returning xpc_initiate_disconnect() will wait until all connections
1544 * on the specified channel have been closed/torndown. So the caller can be
1545 * assured that they will not be receiving any more callouts from XPC to the
1546 * function they registered via xpc_connect().
1547 *
1548 * Arguments:
1549 *
1550 * ch_number - channel # to unregister.
1551 */
1552void
1553xpc_initiate_disconnect(int ch_number)
1554{
1555 unsigned long irq_flags;
1556 partid_t partid;
1557 struct xpc_partition *part;
1558 struct xpc_channel *ch;
1559
1560
1561 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1562
1563 /* initiate the channel disconnect for every active partition */
1564 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1565 part = &xpc_partitions[partid];
1566
1567 if (xpc_part_ref(part)) {
1568 ch = &part->channels[ch_number];
1569 xpc_msgqueue_ref(ch);
1570
1571 spin_lock_irqsave(&ch->lock, irq_flags);
1572
a607c389
DN
1573 if (!(ch->flags & XPC_C_DISCONNECTED)) {
1574 ch->flags |= XPC_C_WDISCONNECT;
1575
1576 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
89eb8eb9 1577 &irq_flags);
a607c389 1578 }
89eb8eb9
DN
1579
1580 spin_unlock_irqrestore(&ch->lock, irq_flags);
1581
1582 xpc_msgqueue_deref(ch);
1583 xpc_part_deref(part);
1584 }
1585 }
1586
1587 xpc_disconnect_wait(ch_number);
1588}
1589
1590
1591/*
1592 * To disconnect a channel, and reflect it back to all who may be waiting.
1593 *
a607c389
DN
1594 * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
1595 * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
1596 * xpc_disconnect_wait().
89eb8eb9
DN
1597 *
1598 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
1599 */
1600void
1601xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1602 enum xpc_retval reason, unsigned long *irq_flags)
1603{
a607c389 1604 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
89eb8eb9
DN
1605
1606
1607 DBUG_ON(!spin_is_locked(&ch->lock));
1608
1609 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
1610 return;
1611 }
1612 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
1613
1614 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
1615 reason, line, ch->partid, ch->number);
1616
1617 XPC_SET_REASON(ch, reason, line);
1618
a607c389 1619 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
89eb8eb9
DN
1620 /* some of these may not have been set */
1621 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1622 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1623 XPC_C_CONNECTING | XPC_C_CONNECTED);
1624
89eb8eb9
DN
1625 xpc_IPI_send_closerequest(ch, irq_flags);
1626
a607c389 1627 if (channel_was_connected) {
89eb8eb9
DN
1628 ch->flags |= XPC_C_WASCONNECTED;
1629 }
1630
a607c389
DN
1631 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1632
1633 /* wake all idle kthreads so they can exit */
89eb8eb9 1634 if (atomic_read(&ch->kthreads_idle) > 0) {
89eb8eb9
DN
1635 wake_up_all(&ch->idle_wq);
1636 }
1637
89eb8eb9 1638 /* wake those waiting to allocate an entry from the local msg queue */
89eb8eb9
DN
1639 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
1640 wake_up(&ch->msg_allocate_wq);
1641 }
1642
89eb8eb9
DN
1643 spin_lock_irqsave(&ch->lock, *irq_flags);
1644}
1645
1646
1647void
a607c389 1648xpc_disconnecting_callout(struct xpc_channel *ch)
89eb8eb9
DN
1649{
1650 /*
a607c389 1651 * Let the channel's registerer know that the channel is being
89eb8eb9 1652 * disconnected. We don't want to do this if the registerer was never
a607c389 1653 * informed of a connection being made.
89eb8eb9
DN
1654 */
1655
1656 if (ch->func != NULL) {
a607c389
DN
1657 dev_dbg(xpc_chan, "ch->func() called, reason=xpcDisconnecting,"
1658 " partid=%d, channel=%d\n", ch->partid, ch->number);
89eb8eb9 1659
a607c389
DN
1660 ch->func(xpcDisconnecting, ch->partid, ch->number, NULL,
1661 ch->key);
89eb8eb9 1662
a607c389
DN
1663 dev_dbg(xpc_chan, "ch->func() returned, reason="
1664 "xpcDisconnecting, partid=%d, channel=%d\n",
1665 ch->partid, ch->number);
89eb8eb9
DN
1666 }
1667}
1668
1669
1670/*
1671 * Wait for a message entry to become available for the specified channel,
1672 * but don't wait any longer than 1 jiffy.
1673 */
1674static enum xpc_retval
1675xpc_allocate_msg_wait(struct xpc_channel *ch)
1676{
1677 enum xpc_retval ret;
1678
1679
1680 if (ch->flags & XPC_C_DISCONNECTING) {
1681 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1682 return ch->reason;
1683 }
1684
1685 atomic_inc(&ch->n_on_msg_allocate_wq);
1686 ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
1687 atomic_dec(&ch->n_on_msg_allocate_wq);
1688
1689 if (ch->flags & XPC_C_DISCONNECTING) {
1690 ret = ch->reason;
1691 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1692 } else if (ret == 0) {
1693 ret = xpcTimeout;
1694 } else {
1695 ret = xpcInterrupted;
1696 }
1697
1698 return ret;
1699}
1700
1701
1702/*
1703 * Allocate an entry for a message from the message queue associated with the
1704 * specified channel.
1705 */
1706static enum xpc_retval
1707xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1708 struct xpc_msg **address_of_msg)
1709{
1710 struct xpc_msg *msg;
1711 enum xpc_retval ret;
1712 s64 put;
1713
1714
1715 /* this reference will be dropped in xpc_send_msg() */
1716 xpc_msgqueue_ref(ch);
1717
1718 if (ch->flags & XPC_C_DISCONNECTING) {
1719 xpc_msgqueue_deref(ch);
1720 return ch->reason;
1721 }
1722 if (!(ch->flags & XPC_C_CONNECTED)) {
1723 xpc_msgqueue_deref(ch);
1724 return xpcNotConnected;
1725 }
1726
1727
1728 /*
1729 * Get the next available message entry from the local message queue.
1730 * If none are available, we'll make sure that we grab the latest
1731 * GP values.
1732 */
1733 ret = xpcTimeout;
1734
1735 while (1) {
1736
1737 put = (volatile s64) ch->w_local_GP.put;
1738 if (put - (volatile s64) ch->w_remote_GP.get <
1739 ch->local_nentries) {
1740
1741 /* There are available message entries. We need to try
1742 * to secure one for ourselves. We'll do this by trying
1743 * to increment w_local_GP.put as long as someone else
1744 * doesn't beat us to it. If they do, we'll have to
1745 * try again.
1746 */
1747 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
1748 put) {
1749 /* we got the entry referenced by put */
1750 break;
1751 }
1752 continue; /* try again */
1753 }
1754
1755
1756 /*
1757 * There aren't any available msg entries at this time.
1758 *
1759 * In waiting for a message entry to become available,
1760 * we set a timeout in case the other side is not
1761 * sending completion IPIs. This lets us fake an IPI
1762 * that will cause the IPI handler to fetch the latest
1763 * GP values as if an IPI was sent by the other side.
1764 */
1765 if (ret == xpcTimeout) {
1766 xpc_IPI_send_local_msgrequest(ch);
1767 }
1768
1769 if (flags & XPC_NOWAIT) {
1770 xpc_msgqueue_deref(ch);
1771 return xpcNoWait;
1772 }
1773
1774 ret = xpc_allocate_msg_wait(ch);
1775 if (ret != xpcInterrupted && ret != xpcTimeout) {
1776 xpc_msgqueue_deref(ch);
1777 return ret;
1778 }
1779 }
1780
1781
1782 /* get the message's address and initialize it */
1783 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1784 (put % ch->local_nentries) * ch->msg_size);
1785
1786
1787 DBUG_ON(msg->flags != 0);
1788 msg->number = put;
1789
1790 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1791 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1792 (void *) msg, msg->number, ch->partid, ch->number);
1793
1794 *address_of_msg = msg;
1795
1796 return xpcSuccess;
1797}
1798
1799
1800/*
1801 * Allocate an entry for a message from the message queue associated with the
1802 * specified channel. NOTE that this routine can sleep waiting for a message
1803 * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
1804 *
1805 * Arguments:
1806 *
1807 * partid - ID of partition to which the channel is connected.
1808 * ch_number - channel #.
1809 * flags - see xpc.h for valid flags.
1810 * payload - address of the allocated payload area pointer (filled in on
1811 * return) in which the user-defined message is constructed.
1812 */
1813enum xpc_retval
1814xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1815{
1816 struct xpc_partition *part = &xpc_partitions[partid];
1817 enum xpc_retval ret = xpcUnknownReason;
1818 struct xpc_msg *msg;
1819
1820
1821 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1822 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1823
1824 *payload = NULL;
1825
1826 if (xpc_part_ref(part)) {
1827 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1828 xpc_part_deref(part);
1829
1830 if (msg != NULL) {
1831 *payload = &msg->payload;
1832 }
1833 }
1834
1835 return ret;
1836}
1837
1838
1839/*
1840 * Now we actually send the messages that are ready to be sent by advancing
1841 * the local message queue's Put value and then send an IPI to the recipient
1842 * partition.
1843 */
1844static void
1845xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1846{
1847 struct xpc_msg *msg;
1848 s64 put = initial_put + 1;
1849 int send_IPI = 0;
1850
1851
1852 while (1) {
1853
1854 while (1) {
1855 if (put == (volatile s64) ch->w_local_GP.put) {
1856 break;
1857 }
1858
1859 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1860 (put % ch->local_nentries) * ch->msg_size);
1861
1862 if (!(msg->flags & XPC_M_READY)) {
1863 break;
1864 }
1865
1866 put++;
1867 }
1868
1869 if (put == initial_put) {
1870 /* nothing's changed */
1871 break;
1872 }
1873
1874 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1875 initial_put) {
1876 /* someone else beat us to it */
1877 DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
1878 break;
1879 }
1880
1881 /* we just set the new value of local_GP->put */
1882
1883 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
1884 "channel=%d\n", put, ch->partid, ch->number);
1885
1886 send_IPI = 1;
1887
1888 /*
1889 * We need to ensure that the message referenced by
1890 * local_GP->put is not XPC_M_READY or that local_GP->put
1891 * equals w_local_GP.put, so we'll go have a look.
1892 */
1893 initial_put = put;
1894 }
1895
1896 if (send_IPI) {
1897 xpc_IPI_send_msgrequest(ch);
1898 }
1899}
1900
1901
1902/*
1903 * Common code that does the actual sending of the message by advancing the
1904 * local message queue's Put value and sends an IPI to the partition the
1905 * message is being sent to.
1906 */
1907static enum xpc_retval
1908xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1909 xpc_notify_func func, void *key)
1910{
1911 enum xpc_retval ret = xpcSuccess;
a607c389 1912 struct xpc_notify *notify = notify;
89eb8eb9
DN
1913 s64 put, msg_number = msg->number;
1914
1915
1916 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1917 DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
1918 msg_number % ch->local_nentries);
1919 DBUG_ON(msg->flags & XPC_M_READY);
1920
1921 if (ch->flags & XPC_C_DISCONNECTING) {
1922 /* drop the reference grabbed in xpc_allocate_msg() */
1923 xpc_msgqueue_deref(ch);
1924 return ch->reason;
1925 }
1926
1927 if (notify_type != 0) {
1928 /*
1929 * Tell the remote side to send an ACK interrupt when the
1930 * message has been delivered.
1931 */
1932 msg->flags |= XPC_M_INTERRUPT;
1933
1934 atomic_inc(&ch->n_to_notify);
1935
1936 notify = &ch->notify_queue[msg_number % ch->local_nentries];
1937 notify->func = func;
1938 notify->key = key;
821fe947 1939 notify->type = notify_type;
89eb8eb9
DN
1940
1941 // >>> is a mb() needed here?
1942
1943 if (ch->flags & XPC_C_DISCONNECTING) {
1944 /*
1945 * An error occurred between our last error check and
1946 * this one. We will try to clear the type field from
1947 * the notify entry. If we succeed then
1948 * xpc_disconnect_channel() didn't already process
1949 * the notify entry.
1950 */
1951 if (cmpxchg(&notify->type, notify_type, 0) ==
1952 notify_type) {
1953 atomic_dec(&ch->n_to_notify);
1954 ret = ch->reason;
1955 }
1956
1957 /* drop the reference grabbed in xpc_allocate_msg() */
1958 xpc_msgqueue_deref(ch);
1959 return ret;
1960 }
1961 }
1962
1963 msg->flags |= XPC_M_READY;
1964
1965 /*
1966 * The preceding store of msg->flags must occur before the following
1967 * load of ch->local_GP->put.
1968 */
1969 mb();
1970
1971 /* see if the message is next in line to be sent, if so send it */
1972
1973 put = ch->local_GP->put;
1974 if (put == msg_number) {
1975 xpc_send_msgs(ch, put);
1976 }
1977
1978 /* drop the reference grabbed in xpc_allocate_msg() */
1979 xpc_msgqueue_deref(ch);
1980 return ret;
1981}
1982
1983
1984/*
1985 * Send a message previously allocated using xpc_initiate_allocate() on the
1986 * specified channel connected to the specified partition.
1987 *
1988 * This routine will not wait for the message to be received, nor will
1989 * notification be given when it does happen. Once this routine has returned
1990 * the message entry allocated via xpc_initiate_allocate() is no longer
1991 * accessable to the caller.
1992 *
1993 * This routine, although called by users, does not call xpc_part_ref() to
1994 * ensure that the partition infrastructure is in place. It relies on the
1995 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1996 *
1997 * Arguments:
1998 *
1999 * partid - ID of partition to which the channel is connected.
2000 * ch_number - channel # to send message on.
2001 * payload - pointer to the payload area allocated via
2002 * xpc_initiate_allocate().
2003 */
2004enum xpc_retval
2005xpc_initiate_send(partid_t partid, int ch_number, void *payload)
2006{
2007 struct xpc_partition *part = &xpc_partitions[partid];
2008 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2009 enum xpc_retval ret;
2010
2011
2012 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2013 partid, ch_number);
2014
2015 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2016 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2017 DBUG_ON(msg == NULL);
2018
2019 ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
2020
2021 return ret;
2022}
2023
2024
2025/*
2026 * Send a message previously allocated using xpc_initiate_allocate on the
2027 * specified channel connected to the specified partition.
2028 *
2029 * This routine will not wait for the message to be sent. Once this routine
2030 * has returned the message entry allocated via xpc_initiate_allocate() is no
2031 * longer accessable to the caller.
2032 *
2033 * Once the remote end of the channel has received the message, the function
2034 * passed as an argument to xpc_initiate_send_notify() will be called. This
2035 * allows the sender to free up or re-use any buffers referenced by the
2036 * message, but does NOT mean the message has been processed at the remote
2037 * end by a receiver.
2038 *
2039 * If this routine returns an error, the caller's function will NOT be called.
2040 *
2041 * This routine, although called by users, does not call xpc_part_ref() to
2042 * ensure that the partition infrastructure is in place. It relies on the
2043 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
2044 *
2045 * Arguments:
2046 *
2047 * partid - ID of partition to which the channel is connected.
2048 * ch_number - channel # to send message on.
2049 * payload - pointer to the payload area allocated via
2050 * xpc_initiate_allocate().
2051 * func - function to call with asynchronous notification of message
2052 * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
2053 * key - user-defined key to be passed to the function when it's called.
2054 */
2055enum xpc_retval
2056xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
2057 xpc_notify_func func, void *key)
2058{
2059 struct xpc_partition *part = &xpc_partitions[partid];
2060 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2061 enum xpc_retval ret;
2062
2063
2064 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2065 partid, ch_number);
2066
2067 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2068 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2069 DBUG_ON(msg == NULL);
2070 DBUG_ON(func == NULL);
2071
2072 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
2073 func, key);
2074 return ret;
2075}
2076
2077
2078static struct xpc_msg *
2079xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2080{
2081 struct xpc_partition *part = &xpc_partitions[ch->partid];
2082 struct xpc_msg *remote_msg, *msg;
2083 u32 msg_index, nmsgs;
2084 u64 msg_offset;
2085 enum xpc_retval ret;
2086
2087
2088 if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
2089 /* we were interrupted by a signal */
2090 return NULL;
2091 }
2092
2093 while (get >= ch->next_msg_to_pull) {
2094
2095 /* pull as many messages as are ready and able to be pulled */
2096
2097 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2098
2099 DBUG_ON(ch->next_msg_to_pull >=
2100 (volatile s64) ch->w_remote_GP.put);
2101 nmsgs = (volatile s64) ch->w_remote_GP.put -
2102 ch->next_msg_to_pull;
2103 if (msg_index + nmsgs > ch->remote_nentries) {
2104 /* ignore the ones that wrap the msg queue for now */
2105 nmsgs = ch->remote_nentries - msg_index;
2106 }
2107
2108 msg_offset = msg_index * ch->msg_size;
2109 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
2110 msg_offset);
2111 remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
2112 msg_offset);
2113
2114 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2115 nmsgs * ch->msg_size)) != xpcSuccess) {
2116
2117 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2118 " msg %ld from partition %d, channel=%d, "
2119 "ret=%d\n", nmsgs, ch->next_msg_to_pull,
2120 ch->partid, ch->number, ret);
2121
2122 XPC_DEACTIVATE_PARTITION(part, ret);
2123
2124 up(&ch->msg_to_pull_sema);
2125 return NULL;
2126 }
2127
2128 mb(); /* >>> this may not be needed, we're not sure */
2129
2130 ch->next_msg_to_pull += nmsgs;
2131 }
2132
2133 up(&ch->msg_to_pull_sema);
2134
2135 /* return the message we were looking for */
2136 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2137 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
2138
2139 return msg;
2140}
2141
2142
2143/*
2144 * Get a message to be delivered.
2145 */
2146static struct xpc_msg *
2147xpc_get_deliverable_msg(struct xpc_channel *ch)
2148{
2149 struct xpc_msg *msg = NULL;
2150 s64 get;
2151
2152
2153 do {
2154 if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
2155 break;
2156 }
2157
2158 get = (volatile s64) ch->w_local_GP.get;
2159 if (get == (volatile s64) ch->w_remote_GP.put) {
2160 break;
2161 }
2162
2163 /* There are messages waiting to be pulled and delivered.
2164 * We need to try to secure one for ourselves. We'll do this
2165 * by trying to increment w_local_GP.get and hope that no one
2166 * else beats us to it. If they do, we'll we'll simply have
2167 * to try again for the next one.
2168 */
2169
2170 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2171 /* we got the entry referenced by get */
2172
2173 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
2174 "partid=%d, channel=%d\n", get + 1,
2175 ch->partid, ch->number);
2176
2177 /* pull the message from the remote partition */
2178
2179 msg = xpc_pull_remote_msg(ch, get);
2180
2181 DBUG_ON(msg != NULL && msg->number != get);
2182 DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
2183 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
2184
2185 break;
2186 }
2187
2188 } while (1);
2189
2190 return msg;
2191}
2192
2193
2194/*
2195 * Deliver a message to its intended recipient.
2196 */
2197void
2198xpc_deliver_msg(struct xpc_channel *ch)
2199{
2200 struct xpc_msg *msg;
2201
2202
2203 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
2204
2205 /*
2206 * This ref is taken to protect the payload itself from being
2207 * freed before the user is finished with it, which the user
2208 * indicates by calling xpc_initiate_received().
2209 */
2210 xpc_msgqueue_ref(ch);
2211
2212 atomic_inc(&ch->kthreads_active);
2213
2214 if (ch->func != NULL) {
2215 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
2216 "msg_number=%ld, partid=%d, channel=%d\n",
2217 (void *) msg, msg->number, ch->partid,
2218 ch->number);
2219
2220 /* deliver the message to its intended recipient */
2221 ch->func(xpcMsgReceived, ch->partid, ch->number,
2222 &msg->payload, ch->key);
2223
2224 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
2225 "msg_number=%ld, partid=%d, channel=%d\n",
2226 (void *) msg, msg->number, ch->partid,
2227 ch->number);
2228 }
2229
2230 atomic_dec(&ch->kthreads_active);
2231 }
2232}
2233
2234
2235/*
2236 * Now we actually acknowledge the messages that have been delivered and ack'd
2237 * by advancing the cached remote message queue's Get value and if requested
2238 * send an IPI to the message sender's partition.
2239 */
2240static void
2241xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2242{
2243 struct xpc_msg *msg;
2244 s64 get = initial_get + 1;
2245 int send_IPI = 0;
2246
2247
2248 while (1) {
2249
2250 while (1) {
2251 if (get == (volatile s64) ch->w_local_GP.get) {
2252 break;
2253 }
2254
2255 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
2256 (get % ch->remote_nentries) * ch->msg_size);
2257
2258 if (!(msg->flags & XPC_M_DONE)) {
2259 break;
2260 }
2261
2262 msg_flags |= msg->flags;
2263 get++;
2264 }
2265
2266 if (get == initial_get) {
2267 /* nothing's changed */
2268 break;
2269 }
2270
2271 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2272 initial_get) {
2273 /* someone else beat us to it */
2274 DBUG_ON((volatile s64) ch->local_GP->get <=
2275 initial_get);
2276 break;
2277 }
2278
2279 /* we just set the new value of local_GP->get */
2280
2281 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2282 "channel=%d\n", get, ch->partid, ch->number);
2283
2284 send_IPI = (msg_flags & XPC_M_INTERRUPT);
2285
2286 /*
2287 * We need to ensure that the message referenced by
2288 * local_GP->get is not XPC_M_DONE or that local_GP->get
2289 * equals w_local_GP.get, so we'll go have a look.
2290 */
2291 initial_get = get;
2292 }
2293
2294 if (send_IPI) {
2295 xpc_IPI_send_msgrequest(ch);
2296 }
2297}
2298
2299
2300/*
2301 * Acknowledge receipt of a delivered message.
2302 *
2303 * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
2304 * that sent the message.
2305 *
2306 * This function, although called by users, does not call xpc_part_ref() to
2307 * ensure that the partition infrastructure is in place. It relies on the
2308 * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
2309 *
2310 * Arguments:
2311 *
2312 * partid - ID of partition to which the channel is connected.
2313 * ch_number - channel # message received on.
2314 * payload - pointer to the payload area allocated via
2315 * xpc_initiate_allocate().
2316 */
2317void
2318xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2319{
2320 struct xpc_partition *part = &xpc_partitions[partid];
2321 struct xpc_channel *ch;
2322 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2323 s64 get, msg_number = msg->number;
2324
2325
2326 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2327 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2328
2329 ch = &part->channels[ch_number];
2330
2331 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2332 (void *) msg, msg_number, ch->partid, ch->number);
2333
2334 DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
2335 msg_number % ch->remote_nentries);
2336 DBUG_ON(msg->flags & XPC_M_DONE);
2337
2338 msg->flags |= XPC_M_DONE;
2339
2340 /*
2341 * The preceding store of msg->flags must occur before the following
2342 * load of ch->local_GP->get.
2343 */
2344 mb();
2345
2346 /*
2347 * See if this message is next in line to be acknowledged as having
2348 * been delivered.
2349 */
2350 get = ch->local_GP->get;
2351 if (get == msg_number) {
2352 xpc_acknowledge_msgs(ch, get, msg->flags);
2353 }
2354
2355 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2356 xpc_msgqueue_deref(ch);
2357}
2358
This page took 0.201234 seconds and 5 git commands to generate.