1 // SPDX-License-Identifier: GPL-2.0
2 /* ldc.c: Logical Domain Channel link-layer protocol driver.
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/scatterlist.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/bitmap.h>
19 #include <asm/iommu-common.h>
21 #include <asm/hypervisor.h>
22 #include <asm/iommu.h>
25 #include <asm/mdesc.h>
27 #define DRV_MODULE_NAME "ldc"
28 #define PFX DRV_MODULE_NAME ": "
29 #define DRV_MODULE_VERSION "1.1"
30 #define DRV_MODULE_RELDATE "July 22, 2008"
32 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
33 #define COOKIE_PGSZ_CODE_SHIFT 60ULL
36 static char version[] =
37 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39 /* Packet header layout for unreliable and reliable mode frames.
40 * When in RAW mode, packets are simply straight 64-byte payloads
55 #define LDC_VERS 0x01 /* Link Version */
56 #define LDC_RTS 0x02 /* Request To Send */
57 #define LDC_RTR 0x03 /* Ready To Receive */
58 #define LDC_RDX 0x04 /* Ready for Data eXchange */
59 #define LDC_CTRL_MSK 0x0f
63 #define LDC_FRAG_MASK 0xc0
64 #define LDC_START 0x40
70 u8 u_data[LDC_PACKET_SIZE - 8];
74 u8 r_data[LDC_PACKET_SIZE - 8 - 8];
84 /* Ordered from largest major to lowest. */
85 static struct ldc_version ver_arr[] = {
86 { .major = 1, .minor = 0 },
89 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
90 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
95 int (*write)(struct ldc_channel *, const void *, unsigned int);
96 int (*read)(struct ldc_channel *, void *, unsigned int);
99 static const struct ldc_mode_ops raw_ops;
100 static const struct ldc_mode_ops nonraw_ops;
101 static const struct ldc_mode_ops stream_ops;
103 int ldom_domaining_enabled;
106 /* Protects ldc_unmap. */
108 struct ldc_mtable_entry *page_table;
109 struct iommu_map_table iommu_map_table;
113 /* Protects all operations that depend upon channel state. */
122 struct ldc_packet *tx_base;
123 unsigned long tx_head;
124 unsigned long tx_tail;
125 unsigned long tx_num_entries;
128 unsigned long tx_acked;
130 struct ldc_packet *rx_base;
131 unsigned long rx_head;
132 unsigned long rx_tail;
133 unsigned long rx_num_entries;
139 unsigned long chan_state;
141 struct ldc_channel_config cfg;
144 const struct ldc_mode_ops *mops;
146 struct ldc_iommu iommu;
148 struct ldc_version ver;
151 #define LDC_HS_CLOSED 0x00
152 #define LDC_HS_OPEN 0x01
153 #define LDC_HS_GOTVERS 0x02
154 #define LDC_HS_SENTRTR 0x03
155 #define LDC_HS_GOTRTR 0x04
156 #define LDC_HS_COMPLETE 0x10
159 #define LDC_FLAG_ALLOCED_QUEUES 0x01
160 #define LDC_FLAG_REGISTERED_QUEUES 0x02
161 #define LDC_FLAG_REGISTERED_IRQS 0x04
162 #define LDC_FLAG_RESET 0x10
167 #define LDC_IRQ_NAME_MAX 32
168 char rx_irq_name[LDC_IRQ_NAME_MAX];
169 char tx_irq_name[LDC_IRQ_NAME_MAX];
171 struct hlist_head mh_list;
173 struct hlist_node list;
176 #define ldcdbg(TYPE, f, a...) \
177 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
178 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
181 #define LDC_ABORT(lp) ldc_abort((lp), __func__)
183 static const char *state_to_str(u8 state)
186 case LDC_STATE_INVALID:
190 case LDC_STATE_BOUND:
192 case LDC_STATE_READY:
194 case LDC_STATE_CONNECTED:
201 static unsigned long __advance(unsigned long off, unsigned long num_entries)
203 off += LDC_PACKET_SIZE;
204 if (off == (num_entries * LDC_PACKET_SIZE))
210 static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
212 return __advance(off, lp->rx_num_entries);
215 static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
217 return __advance(off, lp->tx_num_entries);
220 static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
221 unsigned long *new_tail)
223 struct ldc_packet *p;
226 t = tx_advance(lp, lp->tx_tail);
227 if (t == lp->tx_head)
233 return p + (lp->tx_tail / LDC_PACKET_SIZE);
236 /* When we are in reliable or stream mode, have to track the next packet
237 * we haven't gotten an ACK for in the TX queue using tx_acked. We have
238 * to be careful not to stomp over the queue past that point. During
239 * the handshake, we don't have TX data packets pending in the queue
240 * and that's why handshake_get_tx_packet() need not be mindful of
243 static unsigned long head_for_data(struct ldc_channel *lp)
245 if (lp->cfg.mode == LDC_MODE_STREAM)
250 static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
252 unsigned long limit, tail, new_tail, diff;
255 limit = head_for_data(lp);
257 new_tail = tx_advance(lp, tail);
258 if (new_tail == limit)
261 if (limit > new_tail)
262 diff = limit - new_tail;
265 ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
266 diff /= LDC_PACKET_SIZE;
269 if (diff * mss < size)
275 static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
276 unsigned long *new_tail)
278 struct ldc_packet *p;
281 h = head_for_data(lp);
282 t = tx_advance(lp, lp->tx_tail);
289 return p + (lp->tx_tail / LDC_PACKET_SIZE);
292 static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
294 unsigned long orig_tail = lp->tx_tail;
298 while (limit-- > 0) {
301 err = sun4v_ldc_tx_set_qtail(lp->id, tail);
305 if (err != HV_EWOULDBLOCK) {
306 lp->tx_tail = orig_tail;
312 lp->tx_tail = orig_tail;
316 /* This just updates the head value in the hypervisor using
317 * a polling loop with a timeout. The caller takes care of
318 * upating software state representing the head change, if any.
320 static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
324 while (limit-- > 0) {
327 err = sun4v_ldc_rx_set_qhead(lp->id, head);
331 if (err != HV_EWOULDBLOCK)
340 static int send_tx_packet(struct ldc_channel *lp,
341 struct ldc_packet *p,
342 unsigned long new_tail)
344 BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
346 return set_tx_tail(lp, new_tail);
349 static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
351 void *data, int dlen,
352 unsigned long *new_tail)
354 struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
357 memset(p, 0, sizeof(*p));
362 memcpy(p->u.u_data, data, dlen);
367 static int start_handshake(struct ldc_channel *lp)
369 struct ldc_packet *p;
370 struct ldc_version *ver;
371 unsigned long new_tail;
375 ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
376 ver->major, ver->minor);
378 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
379 ver, sizeof(*ver), &new_tail);
381 int err = send_tx_packet(lp, p, new_tail);
383 lp->flags &= ~LDC_FLAG_RESET;
389 static int send_version_nack(struct ldc_channel *lp,
390 u16 major, u16 minor)
392 struct ldc_packet *p;
393 struct ldc_version ver;
394 unsigned long new_tail;
399 p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
400 &ver, sizeof(ver), &new_tail);
402 ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
403 ver.major, ver.minor);
405 return send_tx_packet(lp, p, new_tail);
410 static int send_version_ack(struct ldc_channel *lp,
411 struct ldc_version *vp)
413 struct ldc_packet *p;
414 unsigned long new_tail;
416 p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
417 vp, sizeof(*vp), &new_tail);
419 ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
420 vp->major, vp->minor);
422 return send_tx_packet(lp, p, new_tail);
427 static int send_rts(struct ldc_channel *lp)
429 struct ldc_packet *p;
430 unsigned long new_tail;
432 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
435 p->env = lp->cfg.mode;
439 ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
442 return send_tx_packet(lp, p, new_tail);
447 static int send_rtr(struct ldc_channel *lp)
449 struct ldc_packet *p;
450 unsigned long new_tail;
452 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
455 p->env = lp->cfg.mode;
458 ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
461 return send_tx_packet(lp, p, new_tail);
466 static int send_rdx(struct ldc_channel *lp)
468 struct ldc_packet *p;
469 unsigned long new_tail;
471 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
475 p->seqid = ++lp->snd_nxt;
476 p->u.r.ackid = lp->rcv_nxt;
478 ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
479 p->env, p->seqid, p->u.r.ackid);
481 return send_tx_packet(lp, p, new_tail);
486 static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
488 struct ldc_packet *p;
489 unsigned long new_tail;
492 p = data_get_tx_packet(lp, &new_tail);
495 memset(p, 0, sizeof(*p));
496 p->type = data_pkt->type;
498 p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
499 p->seqid = lp->snd_nxt + 1;
500 p->u.r.ackid = lp->rcv_nxt;
502 ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
503 p->type, p->ctrl, p->seqid, p->u.r.ackid);
505 err = send_tx_packet(lp, p, new_tail);
512 static int ldc_abort(struct ldc_channel *lp, const char *msg)
514 unsigned long hv_err;
516 ldcdbg(STATE, "ABORT[%s]\n", msg);
519 /* We report but do not act upon the hypervisor errors because
520 * there really isn't much we can do if they fail at this point.
522 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
524 printk(KERN_ERR PFX "ldc_abort: "
525 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
526 lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
528 hv_err = sun4v_ldc_tx_get_state(lp->id,
533 printk(KERN_ERR PFX "ldc_abort: "
534 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
537 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
539 printk(KERN_ERR PFX "ldc_abort: "
540 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
541 lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
543 /* Refetch the RX queue state as well, because we could be invoked
544 * here in the queue processing context.
546 hv_err = sun4v_ldc_rx_get_state(lp->id,
551 printk(KERN_ERR PFX "ldc_abort: "
552 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
558 static struct ldc_version *find_by_major(u16 major)
560 struct ldc_version *ret = NULL;
563 for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
564 struct ldc_version *v = &ver_arr[i];
565 if (v->major <= major) {
573 static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
575 struct ldc_version *vap;
578 ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
579 vp->major, vp->minor);
581 if (lp->hs_state == LDC_HS_GOTVERS) {
582 lp->hs_state = LDC_HS_OPEN;
583 memset(&lp->ver, 0, sizeof(lp->ver));
586 vap = find_by_major(vp->major);
588 err = send_version_nack(lp, 0, 0);
589 } else if (vap->major != vp->major) {
590 err = send_version_nack(lp, vap->major, vap->minor);
592 struct ldc_version ver = *vp;
593 if (ver.minor > vap->minor)
594 ver.minor = vap->minor;
595 err = send_version_ack(lp, &ver);
598 lp->hs_state = LDC_HS_GOTVERS;
602 return LDC_ABORT(lp);
607 static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
609 ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
610 vp->major, vp->minor);
612 if (lp->hs_state == LDC_HS_GOTVERS) {
613 if (lp->ver.major != vp->major ||
614 lp->ver.minor != vp->minor)
615 return LDC_ABORT(lp);
618 lp->hs_state = LDC_HS_GOTVERS;
621 return LDC_ABORT(lp);
625 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
627 struct ldc_version *vap;
628 struct ldc_packet *p;
629 unsigned long new_tail;
631 if (vp->major == 0 && vp->minor == 0)
632 return LDC_ABORT(lp);
634 vap = find_by_major(vp->major);
636 return LDC_ABORT(lp);
638 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
642 return LDC_ABORT(lp);
644 return send_tx_packet(lp, p, new_tail);
647 static int process_version(struct ldc_channel *lp,
648 struct ldc_packet *p)
650 struct ldc_version *vp;
652 vp = (struct ldc_version *) p->u.u_data;
656 return process_ver_info(lp, vp);
659 return process_ver_ack(lp, vp);
662 return process_ver_nack(lp, vp);
665 return LDC_ABORT(lp);
669 static int process_rts(struct ldc_channel *lp,
670 struct ldc_packet *p)
672 ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
673 p->stype, p->seqid, p->env);
675 if (p->stype != LDC_INFO ||
676 lp->hs_state != LDC_HS_GOTVERS ||
677 p->env != lp->cfg.mode)
678 return LDC_ABORT(lp);
680 lp->snd_nxt = p->seqid;
681 lp->rcv_nxt = p->seqid;
682 lp->hs_state = LDC_HS_SENTRTR;
684 return LDC_ABORT(lp);
689 static int process_rtr(struct ldc_channel *lp,
690 struct ldc_packet *p)
692 ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
693 p->stype, p->seqid, p->env);
695 if (p->stype != LDC_INFO ||
696 p->env != lp->cfg.mode)
697 return LDC_ABORT(lp);
699 lp->snd_nxt = p->seqid;
700 lp->hs_state = LDC_HS_COMPLETE;
701 ldc_set_state(lp, LDC_STATE_CONNECTED);
707 static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
709 return lp->rcv_nxt + 1 == seqid;
712 static int process_rdx(struct ldc_channel *lp,
713 struct ldc_packet *p)
715 ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
716 p->stype, p->seqid, p->env, p->u.r.ackid);
718 if (p->stype != LDC_INFO ||
719 !(rx_seq_ok(lp, p->seqid)))
720 return LDC_ABORT(lp);
722 lp->rcv_nxt = p->seqid;
724 lp->hs_state = LDC_HS_COMPLETE;
725 ldc_set_state(lp, LDC_STATE_CONNECTED);
730 static int process_control_frame(struct ldc_channel *lp,
731 struct ldc_packet *p)
735 return process_version(lp, p);
738 return process_rts(lp, p);
741 return process_rtr(lp, p);
744 return process_rdx(lp, p);
747 return LDC_ABORT(lp);
751 static int process_error_frame(struct ldc_channel *lp,
752 struct ldc_packet *p)
754 return LDC_ABORT(lp);
757 static int process_data_ack(struct ldc_channel *lp,
758 struct ldc_packet *ack)
760 unsigned long head = lp->tx_acked;
761 u32 ackid = ack->u.r.ackid;
764 struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
766 head = tx_advance(lp, head);
768 if (p->seqid == ackid) {
772 if (head == lp->tx_tail)
773 return LDC_ABORT(lp);
779 static void send_events(struct ldc_channel *lp, unsigned int event_mask)
781 if (event_mask & LDC_EVENT_RESET)
782 lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
783 if (event_mask & LDC_EVENT_UP)
784 lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
785 if (event_mask & LDC_EVENT_DATA_READY)
786 lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
789 static irqreturn_t ldc_rx(int irq, void *dev_id)
791 struct ldc_channel *lp = dev_id;
792 unsigned long orig_state, flags;
793 unsigned int event_mask;
795 spin_lock_irqsave(&lp->lock, flags);
797 orig_state = lp->chan_state;
799 /* We should probably check for hypervisor errors here and
800 * reset the LDC channel if we get one.
802 sun4v_ldc_rx_get_state(lp->id,
807 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
808 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
812 if (lp->cfg.mode == LDC_MODE_RAW &&
813 lp->chan_state == LDC_CHANNEL_UP) {
814 lp->hs_state = LDC_HS_COMPLETE;
815 ldc_set_state(lp, LDC_STATE_CONNECTED);
818 * Generate an LDC_EVENT_UP event if the channel
819 * was not already up.
821 if (orig_state != LDC_CHANNEL_UP) {
822 event_mask |= LDC_EVENT_UP;
823 orig_state = lp->chan_state;
827 /* If we are in reset state, flush the RX queue and ignore
830 if (lp->flags & LDC_FLAG_RESET) {
831 (void) ldc_rx_reset(lp);
835 /* Once we finish the handshake, we let the ldc_read()
836 * paths do all of the control frame and state management.
837 * Just trigger the callback.
839 if (lp->hs_state == LDC_HS_COMPLETE) {
841 if (lp->chan_state != orig_state) {
842 unsigned int event = LDC_EVENT_RESET;
844 if (lp->chan_state == LDC_CHANNEL_UP)
845 event = LDC_EVENT_UP;
849 if (lp->rx_head != lp->rx_tail)
850 event_mask |= LDC_EVENT_DATA_READY;
855 if (lp->chan_state != orig_state)
858 while (lp->rx_head != lp->rx_tail) {
859 struct ldc_packet *p;
863 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
867 err = process_control_frame(lp, p);
873 event_mask |= LDC_EVENT_DATA_READY;
878 err = process_error_frame(lp, p);
890 new += LDC_PACKET_SIZE;
891 if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
895 err = __set_rx_head(lp, new);
897 (void) LDC_ABORT(lp);
900 if (lp->hs_state == LDC_HS_COMPLETE)
901 goto handshake_complete;
905 spin_unlock_irqrestore(&lp->lock, flags);
907 send_events(lp, event_mask);
912 static irqreturn_t ldc_tx(int irq, void *dev_id)
914 struct ldc_channel *lp = dev_id;
915 unsigned long flags, orig_state;
916 unsigned int event_mask = 0;
918 spin_lock_irqsave(&lp->lock, flags);
920 orig_state = lp->chan_state;
922 /* We should probably check for hypervisor errors here and
923 * reset the LDC channel if we get one.
925 sun4v_ldc_tx_get_state(lp->id,
930 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
931 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
933 if (lp->cfg.mode == LDC_MODE_RAW &&
934 lp->chan_state == LDC_CHANNEL_UP) {
935 lp->hs_state = LDC_HS_COMPLETE;
936 ldc_set_state(lp, LDC_STATE_CONNECTED);
939 * Generate an LDC_EVENT_UP event if the channel
940 * was not already up.
942 if (orig_state != LDC_CHANNEL_UP) {
943 event_mask |= LDC_EVENT_UP;
944 orig_state = lp->chan_state;
948 spin_unlock_irqrestore(&lp->lock, flags);
950 send_events(lp, event_mask);
955 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
956 * XXX that addition and removal from the ldc_channel_list has
957 * XXX atomicity, otherwise the __ldc_channel_exists() check is
958 * XXX totally pointless as another thread can slip into ldc_alloc()
959 * XXX and add a channel with the same ID. There also needs to be
960 * XXX a spinlock for ldc_channel_list.
962 static HLIST_HEAD(ldc_channel_list);
964 static int __ldc_channel_exists(unsigned long id)
966 struct ldc_channel *lp;
968 hlist_for_each_entry(lp, &ldc_channel_list, list) {
975 static int alloc_queue(const char *name, unsigned long num_entries,
976 struct ldc_packet **base, unsigned long *ra)
978 unsigned long size, order;
981 size = num_entries * LDC_PACKET_SIZE;
982 order = get_order(size);
984 q = (void *) __get_free_pages(GFP_KERNEL, order);
986 printk(KERN_ERR PFX "Alloc of %s queue failed with "
987 "size=%lu order=%lu\n", name, size, order);
991 memset(q, 0, PAGE_SIZE << order);
999 static void free_queue(unsigned long num_entries, struct ldc_packet *q)
1001 unsigned long size, order;
1006 size = num_entries * LDC_PACKET_SIZE;
1007 order = get_order(size);
1009 free_pages((unsigned long)q, order);
1012 static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
1014 u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
1015 /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
1017 cookie &= ~COOKIE_PGSZ_CODE;
1019 return (cookie >> (13ULL + (szcode * 3ULL)));
1022 static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie,
1023 unsigned long entry, unsigned long npages)
1025 struct ldc_mtable_entry *base;
1026 unsigned long i, shift;
1028 shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
1029 base = iommu->page_table + entry;
1030 for (i = 0; i < npages; i++) {
1032 sun4v_ldc_revoke(id, cookie + (i << shift),
1038 /* XXX Make this configurable... XXX */
1039 #define LDC_IOTABLE_SIZE (8 * 1024)
1041 static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
1043 unsigned long sz, num_tsb_entries, tsbsize, order;
1044 struct ldc_iommu *ldc_iommu = &lp->iommu;
1045 struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
1046 struct ldc_mtable_entry *table;
1047 unsigned long hv_err;
1050 num_tsb_entries = LDC_IOTABLE_SIZE;
1051 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1052 spin_lock_init(&ldc_iommu->lock);
1054 sz = num_tsb_entries / 8;
1055 sz = (sz + 7UL) & ~7UL;
1056 iommu->map = kzalloc(sz, GFP_KERNEL);
1058 printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
1061 iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
1062 NULL, false /* no large pool */,
1064 true /* skip span boundary check */);
1066 order = get_order(tsbsize);
1068 table = (struct ldc_mtable_entry *)
1069 __get_free_pages(GFP_KERNEL, order);
1072 printk(KERN_ERR PFX "Alloc of MTE table failed, "
1073 "size=%lu order=%lu\n", tsbsize, order);
1077 memset(table, 0, PAGE_SIZE << order);
1079 ldc_iommu->page_table = table;
1081 hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
1085 goto out_free_table;
1090 free_pages((unsigned long) table, order);
1091 ldc_iommu->page_table = NULL;
1100 static void ldc_iommu_release(struct ldc_channel *lp)
1102 struct ldc_iommu *ldc_iommu = &lp->iommu;
1103 struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
1104 unsigned long num_tsb_entries, tsbsize, order;
1106 (void) sun4v_ldc_set_map_table(lp->id, 0, 0);
1108 num_tsb_entries = iommu->poolsize * iommu->nr_pools;
1109 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1110 order = get_order(tsbsize);
1112 free_pages((unsigned long) ldc_iommu->page_table, order);
1113 ldc_iommu->page_table = NULL;
1119 struct ldc_channel *ldc_alloc(unsigned long id,
1120 const struct ldc_channel_config *cfgp,
1124 struct ldc_channel *lp;
1125 const struct ldc_mode_ops *mops;
1126 unsigned long dummy1, dummy2, hv_err;
1131 if (!ldom_domaining_enabled)
1140 switch (cfgp->mode) {
1143 mss = LDC_PACKET_SIZE;
1146 case LDC_MODE_UNRELIABLE:
1148 mss = LDC_PACKET_SIZE - 8;
1151 case LDC_MODE_STREAM:
1153 mss = LDC_PACKET_SIZE - 8 - 8;
1160 if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
1163 hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
1165 if (hv_err == HV_ECHANNEL)
1169 if (__ldc_channel_exists(id))
1174 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
1179 spin_lock_init(&lp->lock);
1183 err = ldc_iommu_init(name, lp);
1192 lp->cfg.mtu = LDC_DEFAULT_MTU;
1194 if (lp->cfg.mode == LDC_MODE_STREAM) {
1195 mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
1198 goto out_free_iommu;
1200 lp->mssbuf = mssbuf;
1203 lp->event_arg = event_arg;
1205 /* XXX allow setting via ldc_channel_config to override defaults
1206 * XXX or use some formula based upon mtu
1208 lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1209 lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1211 err = alloc_queue("TX", lp->tx_num_entries,
1212 &lp->tx_base, &lp->tx_ra);
1214 goto out_free_mssbuf;
1216 err = alloc_queue("RX", lp->rx_num_entries,
1217 &lp->rx_base, &lp->rx_ra);
1221 lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
1223 lp->hs_state = LDC_HS_CLOSED;
1224 ldc_set_state(lp, LDC_STATE_INIT);
1226 INIT_HLIST_NODE(&lp->list);
1227 hlist_add_head(&lp->list, &ldc_channel_list);
1229 INIT_HLIST_HEAD(&lp->mh_list);
1231 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1232 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1234 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
1235 lp->rx_irq_name, lp);
1239 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
1240 lp->tx_irq_name, lp);
1242 free_irq(lp->cfg.rx_irq, lp);
1249 free_queue(lp->tx_num_entries, lp->tx_base);
1255 ldc_iommu_release(lp);
1261 return ERR_PTR(err);
1263 EXPORT_SYMBOL(ldc_alloc);
1265 void ldc_unbind(struct ldc_channel *lp)
1267 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
1268 free_irq(lp->cfg.rx_irq, lp);
1269 free_irq(lp->cfg.tx_irq, lp);
1270 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1273 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
1274 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1275 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1276 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1278 if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
1279 free_queue(lp->tx_num_entries, lp->tx_base);
1280 free_queue(lp->rx_num_entries, lp->rx_base);
1281 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
1284 ldc_set_state(lp, LDC_STATE_INIT);
1286 EXPORT_SYMBOL(ldc_unbind);
1288 void ldc_free(struct ldc_channel *lp)
1291 hlist_del(&lp->list);
1293 ldc_iommu_release(lp);
1297 EXPORT_SYMBOL(ldc_free);
1299 /* Bind the channel. This registers the LDC queues with
1300 * the hypervisor and puts the channel into a pseudo-listening
1301 * state. This does not initiate a handshake, ldc_connect() does
1304 int ldc_bind(struct ldc_channel *lp)
1306 unsigned long hv_err, flags;
1309 if (lp->state != LDC_STATE_INIT)
1312 spin_lock_irqsave(&lp->lock, flags);
1314 enable_irq(lp->cfg.rx_irq);
1315 enable_irq(lp->cfg.tx_irq);
1317 lp->flags |= LDC_FLAG_REGISTERED_IRQS;
1320 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1324 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1328 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1332 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1336 lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
1338 hv_err = sun4v_ldc_tx_get_state(lp->id,
1346 lp->tx_acked = lp->tx_head;
1348 lp->hs_state = LDC_HS_OPEN;
1349 ldc_set_state(lp, LDC_STATE_BOUND);
1351 if (lp->cfg.mode == LDC_MODE_RAW) {
1353 * There is no handshake in RAW mode, so handshake
1356 lp->hs_state = LDC_HS_COMPLETE;
1359 spin_unlock_irqrestore(&lp->lock, flags);
1364 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1365 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1368 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1371 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1372 free_irq(lp->cfg.tx_irq, lp);
1373 free_irq(lp->cfg.rx_irq, lp);
1375 spin_unlock_irqrestore(&lp->lock, flags);
1379 EXPORT_SYMBOL(ldc_bind);
1381 int ldc_connect(struct ldc_channel *lp)
1383 unsigned long flags;
1386 if (lp->cfg.mode == LDC_MODE_RAW)
1389 spin_lock_irqsave(&lp->lock, flags);
1391 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1392 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
1393 lp->hs_state != LDC_HS_OPEN)
1394 err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
1396 err = start_handshake(lp);
1398 spin_unlock_irqrestore(&lp->lock, flags);
1402 EXPORT_SYMBOL(ldc_connect);
1404 int ldc_disconnect(struct ldc_channel *lp)
1406 unsigned long hv_err, flags;
1409 if (lp->cfg.mode == LDC_MODE_RAW)
1412 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1413 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
1416 spin_lock_irqsave(&lp->lock, flags);
1419 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1423 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1427 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1431 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1435 ldc_set_state(lp, LDC_STATE_BOUND);
1436 lp->hs_state = LDC_HS_OPEN;
1437 lp->flags |= LDC_FLAG_RESET;
1439 spin_unlock_irqrestore(&lp->lock, flags);
1444 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1445 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1446 free_irq(lp->cfg.tx_irq, lp);
1447 free_irq(lp->cfg.rx_irq, lp);
1448 lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
1449 LDC_FLAG_REGISTERED_QUEUES);
1450 ldc_set_state(lp, LDC_STATE_INIT);
1452 spin_unlock_irqrestore(&lp->lock, flags);
1456 EXPORT_SYMBOL(ldc_disconnect);
1458 int ldc_state(struct ldc_channel *lp)
1462 EXPORT_SYMBOL(ldc_state);
1464 void ldc_set_state(struct ldc_channel *lp, u8 state)
1466 ldcdbg(STATE, "STATE (%s) --> (%s)\n",
1467 state_to_str(lp->state),
1468 state_to_str(state));
1472 EXPORT_SYMBOL(ldc_set_state);
1474 int ldc_mode(struct ldc_channel *lp)
1476 return lp->cfg.mode;
1478 EXPORT_SYMBOL(ldc_mode);
1480 int ldc_rx_reset(struct ldc_channel *lp)
1482 return __set_rx_head(lp, lp->rx_tail);
1484 EXPORT_SYMBOL(ldc_rx_reset);
1486 void __ldc_print(struct ldc_channel *lp, const char *caller)
1488 pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
1489 "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
1490 "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
1491 "\trcv_nxt=%u snd_nxt=%u\n",
1492 caller, lp->id, lp->flags, state_to_str(lp->state),
1493 lp->chan_state, lp->hs_state,
1494 lp->rx_head, lp->rx_tail, lp->rx_num_entries,
1495 lp->tx_head, lp->tx_tail, lp->tx_num_entries,
1496 lp->rcv_nxt, lp->snd_nxt);
1498 EXPORT_SYMBOL(__ldc_print);
1500 static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
1502 struct ldc_packet *p;
1503 unsigned long new_tail, hv_err;
1506 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
1508 if (unlikely(hv_err))
1511 if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
1512 return LDC_ABORT(lp);
1514 if (size > LDC_PACKET_SIZE)
1517 p = data_get_tx_packet(lp, &new_tail);
1521 memcpy(p, buf, size);
1523 err = send_tx_packet(lp, p, new_tail);
1530 static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
1532 struct ldc_packet *p;
1533 unsigned long hv_err, new;
1536 if (size < LDC_PACKET_SIZE)
1539 hv_err = sun4v_ldc_rx_get_state(lp->id,
1544 return LDC_ABORT(lp);
1546 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1547 lp->chan_state == LDC_CHANNEL_RESETTING)
1550 if (lp->rx_head == lp->rx_tail)
1553 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
1554 memcpy(buf, p, LDC_PACKET_SIZE);
1556 new = rx_advance(lp, lp->rx_head);
1559 err = __set_rx_head(lp, new);
1563 err = LDC_PACKET_SIZE;
1568 static const struct ldc_mode_ops raw_ops = {
1573 static int write_nonraw(struct ldc_channel *lp, const void *buf,
1576 unsigned long hv_err, tail;
1577 unsigned int copied;
1581 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
1583 if (unlikely(hv_err))
1586 if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
1587 return LDC_ABORT(lp);
1589 if (!tx_has_space_for(lp, size))
1595 while (copied < size) {
1596 struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
1597 u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
1603 p->stype = LDC_INFO;
1606 data_len = size - copied;
1607 if (data_len > lp->mss)
1610 BUG_ON(data_len > LDC_LEN);
1612 p->env = (data_len |
1613 (copied == 0 ? LDC_START : 0) |
1614 (data_len == size - copied ? LDC_STOP : 0));
1618 ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
1625 memcpy(data, buf, data_len);
1629 tail = tx_advance(lp, tail);
1632 err = set_tx_tail(lp, tail);
1641 static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
1642 struct ldc_packet *first_frag)
1647 lp->rcv_nxt = first_frag->seqid - 1;
1649 err = send_data_nack(lp, p);
1653 err = ldc_rx_reset(lp);
1655 return LDC_ABORT(lp);
1660 static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
1662 if (p->stype & LDC_ACK) {
1663 int err = process_data_ack(lp, p);
1667 if (p->stype & LDC_NACK)
1668 return LDC_ABORT(lp);
1673 static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
1675 unsigned long dummy;
1678 ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
1679 cur_head, lp->rx_head, lp->rx_tail);
1680 while (limit-- > 0) {
1681 unsigned long hv_err;
1683 hv_err = sun4v_ldc_rx_get_state(lp->id,
1688 return LDC_ABORT(lp);
1690 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1691 lp->chan_state == LDC_CHANNEL_RESETTING)
1694 if (cur_head != lp->rx_tail) {
1695 ldcdbg(DATA, "DATA WAIT DONE "
1696 "head[%lx] tail[%lx] chan_state[%lx]\n",
1697 dummy, lp->rx_tail, lp->chan_state);
1706 static int rx_set_head(struct ldc_channel *lp, unsigned long head)
1708 int err = __set_rx_head(lp, head);
1711 return LDC_ABORT(lp);
1717 static void send_data_ack(struct ldc_channel *lp)
1719 unsigned long new_tail;
1720 struct ldc_packet *p;
1722 p = data_get_tx_packet(lp, &new_tail);
1726 memset(p, 0, sizeof(*p));
1730 p->seqid = lp->snd_nxt + 1;
1731 p->u.r.ackid = lp->rcv_nxt;
1733 err = send_tx_packet(lp, p, new_tail);
1739 static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
1741 struct ldc_packet *first_frag;
1742 unsigned long hv_err, new;
1745 hv_err = sun4v_ldc_rx_get_state(lp->id,
1750 return LDC_ABORT(lp);
1752 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1753 lp->chan_state == LDC_CHANNEL_RESETTING)
1756 if (lp->rx_head == lp->rx_tail)
1763 struct ldc_packet *p;
1766 BUG_ON(new == lp->rx_tail);
1767 p = lp->rx_base + (new / LDC_PACKET_SIZE);
1769 ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
1779 if (unlikely(!rx_seq_ok(lp, p->seqid))) {
1780 err = rx_bad_seq(lp, p, first_frag);
1785 if (p->type & LDC_CTRL) {
1786 err = process_control_frame(lp, p);
1792 lp->rcv_nxt = p->seqid;
1795 * If this is a control-only packet, there is nothing
1796 * else to do but advance the rx queue since the packet
1797 * was already processed above.
1799 if (!(p->type & LDC_DATA)) {
1800 new = rx_advance(lp, new);
1803 if (p->stype & (LDC_ACK | LDC_NACK)) {
1804 err = data_ack_nack(lp, p);
1808 if (!(p->stype & LDC_INFO)) {
1809 new = rx_advance(lp, new);
1810 err = rx_set_head(lp, new);
1816 pkt_len = p->env & LDC_LEN;
1818 /* Every initial packet starts with the START bit set.
1820 * Singleton packets will have both START+STOP set.
1822 * Fragments will have START set in the first frame, STOP
1823 * set in the last frame, and neither bit set in middle
1824 * frames of the packet.
1826 * Therefore if we are at the beginning of a packet and
1827 * we don't see START, or we are in the middle of a fragmented
1828 * packet and do see START, we are unsynchronized and should
1829 * flush the RX queue.
1831 if ((first_frag == NULL && !(p->env & LDC_START)) ||
1832 (first_frag != NULL && (p->env & LDC_START))) {
1834 new = rx_advance(lp, new);
1836 err = rx_set_head(lp, new);
1846 if (pkt_len > size - copied) {
1847 /* User didn't give us a big enough buffer,
1848 * what to do? This is a pretty serious error.
1850 * Since we haven't updated the RX ring head to
1851 * consume any of the packets, signal the error
1852 * to the user and just leave the RX ring alone.
1854 * This seems the best behavior because this allows
1855 * a user of the LDC layer to start with a small
1856 * RX buffer for ldc_read() calls and use -EMSGSIZE
1857 * as a cue to enlarge it's read buffer.
1863 /* Ok, we are gonna eat this one. */
1864 new = rx_advance(lp, new);
1867 (lp->cfg.mode == LDC_MODE_UNRELIABLE ?
1868 p->u.u_data : p->u.r.r_data), pkt_len);
1872 if (p->env & LDC_STOP)
1876 if (new == lp->rx_tail) {
1877 err = rx_data_wait(lp, new);
1884 err = rx_set_head(lp, new);
1886 if (err && first_frag)
1887 lp->rcv_nxt = first_frag->seqid - 1;
1891 if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
1898 static const struct ldc_mode_ops nonraw_ops = {
1899 .write = write_nonraw,
1900 .read = read_nonraw,
1903 static int write_stream(struct ldc_channel *lp, const void *buf,
1906 if (size > lp->cfg.mtu)
1908 return write_nonraw(lp, buf, size);
1911 static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
1913 if (!lp->mssbuf_len) {
1914 int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
1918 lp->mssbuf_len = err;
1922 if (size > lp->mssbuf_len)
1923 size = lp->mssbuf_len;
1924 memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
1926 lp->mssbuf_off += size;
1927 lp->mssbuf_len -= size;
1932 static const struct ldc_mode_ops stream_ops = {
1933 .write = write_stream,
1934 .read = read_stream,
1937 int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
1939 unsigned long flags;
1948 spin_lock_irqsave(&lp->lock, flags);
1950 if (lp->hs_state != LDC_HS_COMPLETE)
1953 err = lp->mops->write(lp, buf, size);
1955 spin_unlock_irqrestore(&lp->lock, flags);
1959 EXPORT_SYMBOL(ldc_write);
1961 int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
1963 unsigned long flags;
1966 ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
1974 spin_lock_irqsave(&lp->lock, flags);
1976 if (lp->hs_state != LDC_HS_COMPLETE)
1979 err = lp->mops->read(lp, buf, size);
1981 spin_unlock_irqrestore(&lp->lock, flags);
1983 ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
1984 lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
1988 EXPORT_SYMBOL(ldc_read);
1990 static u64 pagesize_code(void)
1992 switch (PAGE_SIZE) {
1994 case (8ULL * 1024ULL):
1996 case (64ULL * 1024ULL):
1998 case (512ULL * 1024ULL):
2000 case (4ULL * 1024ULL * 1024ULL):
2002 case (32ULL * 1024ULL * 1024ULL):
2004 case (256ULL * 1024ULL * 1024ULL):
2009 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
2011 return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
2012 (index << PAGE_SHIFT) |
2017 static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
2018 unsigned long npages)
2022 entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
2023 npages, NULL, (unsigned long)-1, 0);
2024 if (unlikely(entry == IOMMU_ERROR_CODE))
2027 return iommu->page_table + entry;
2030 static u64 perm_to_mte(unsigned int map_perm)
2034 mte_base = pagesize_code();
2036 if (map_perm & LDC_MAP_SHADOW) {
2037 if (map_perm & LDC_MAP_R)
2038 mte_base |= LDC_MTE_COPY_R;
2039 if (map_perm & LDC_MAP_W)
2040 mte_base |= LDC_MTE_COPY_W;
2042 if (map_perm & LDC_MAP_DIRECT) {
2043 if (map_perm & LDC_MAP_R)
2044 mte_base |= LDC_MTE_READ;
2045 if (map_perm & LDC_MAP_W)
2046 mte_base |= LDC_MTE_WRITE;
2047 if (map_perm & LDC_MAP_X)
2048 mte_base |= LDC_MTE_EXEC;
2050 if (map_perm & LDC_MAP_IO) {
2051 if (map_perm & LDC_MAP_R)
2052 mte_base |= LDC_MTE_IOMMU_R;
2053 if (map_perm & LDC_MAP_W)
2054 mte_base |= LDC_MTE_IOMMU_W;
2060 static int pages_in_region(unsigned long base, long len)
2065 unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
2067 len -= (new - base);
2075 struct cookie_state {
2076 struct ldc_mtable_entry *page_table;
2077 struct ldc_trans_cookie *cookies;
2084 static void fill_cookies(struct cookie_state *sp, unsigned long pa,
2085 unsigned long off, unsigned long len)
2088 unsigned long tlen, new = pa + PAGE_SIZE;
2091 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
2095 tlen = PAGE_SIZE - off;
2099 this_cookie = make_cookie(sp->pte_idx,
2100 pagesize_code(), off);
2104 if (this_cookie == sp->prev_cookie) {
2105 sp->cookies[sp->nc - 1].cookie_size += tlen;
2107 sp->cookies[sp->nc].cookie_addr = this_cookie;
2108 sp->cookies[sp->nc].cookie_size = tlen;
2111 sp->prev_cookie = this_cookie + tlen;
2120 static int sg_count_one(struct scatterlist *sg)
2122 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
2123 long len = sg->length;
2125 if ((sg->offset | len) & (8UL - 1))
2128 return pages_in_region(base + sg->offset, len);
2131 static int sg_count_pages(struct scatterlist *sg, int num_sg)
2137 for (i = 0; i < num_sg; i++) {
2138 int err = sg_count_one(sg + i);
2147 int ldc_map_sg(struct ldc_channel *lp,
2148 struct scatterlist *sg, int num_sg,
2149 struct ldc_trans_cookie *cookies, int ncookies,
2150 unsigned int map_perm)
2152 unsigned long i, npages;
2153 struct ldc_mtable_entry *base;
2154 struct cookie_state state;
2155 struct ldc_iommu *iommu;
2157 struct scatterlist *s;
2159 if (map_perm & ~LDC_MAP_ALL)
2162 err = sg_count_pages(sg, num_sg);
2172 base = alloc_npages(iommu, npages);
2177 state.page_table = iommu->page_table;
2178 state.cookies = cookies;
2179 state.mte_base = perm_to_mte(map_perm);
2180 state.prev_cookie = ~(u64)0;
2181 state.pte_idx = (base - iommu->page_table);
2184 for_each_sg(sg, s, num_sg, i) {
2185 fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT,
2186 s->offset, s->length);
2191 EXPORT_SYMBOL(ldc_map_sg);
2193 int ldc_map_single(struct ldc_channel *lp,
2194 void *buf, unsigned int len,
2195 struct ldc_trans_cookie *cookies, int ncookies,
2196 unsigned int map_perm)
2198 unsigned long npages, pa;
2199 struct ldc_mtable_entry *base;
2200 struct cookie_state state;
2201 struct ldc_iommu *iommu;
2203 if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
2207 if ((pa | len) & (8UL - 1))
2210 npages = pages_in_region(pa, len);
2214 base = alloc_npages(iommu, npages);
2219 state.page_table = iommu->page_table;
2220 state.cookies = cookies;
2221 state.mte_base = perm_to_mte(map_perm);
2222 state.prev_cookie = ~(u64)0;
2223 state.pte_idx = (base - iommu->page_table);
2225 fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
2226 BUG_ON(state.nc > ncookies);
2230 EXPORT_SYMBOL(ldc_map_single);
2233 static void free_npages(unsigned long id, struct ldc_iommu *iommu,
2234 u64 cookie, u64 size)
2236 unsigned long npages, entry;
2238 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
2240 entry = ldc_cookie_to_index(cookie, iommu);
2241 ldc_demap(iommu, id, cookie, entry, npages);
2242 iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry);
2245 void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
2248 struct ldc_iommu *iommu = &lp->iommu;
2250 unsigned long flags;
2252 spin_lock_irqsave(&iommu->lock, flags);
2253 for (i = 0; i < ncookies; i++) {
2254 u64 addr = cookies[i].cookie_addr;
2255 u64 size = cookies[i].cookie_size;
2257 free_npages(lp->id, iommu, addr, size);
2259 spin_unlock_irqrestore(&iommu->lock, flags);
2261 EXPORT_SYMBOL(ldc_unmap);
2263 int ldc_copy(struct ldc_channel *lp, int copy_dir,
2264 void *buf, unsigned int len, unsigned long offset,
2265 struct ldc_trans_cookie *cookies, int ncookies)
2267 unsigned int orig_len;
2271 if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
2272 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
2278 if ((ra | len | offset) & (8UL - 1)) {
2279 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
2280 "ra[%lx] len[%x] offset[%lx]\n",
2281 lp->id, ra, len, offset);
2285 if (lp->hs_state != LDC_HS_COMPLETE ||
2286 (lp->flags & LDC_FLAG_RESET)) {
2287 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
2288 "flags[%x]\n", lp->id, lp->hs_state, lp->flags);
2293 for (i = 0; i < ncookies; i++) {
2294 unsigned long cookie_raddr = cookies[i].cookie_addr;
2295 unsigned long this_len = cookies[i].cookie_size;
2296 unsigned long actual_len;
2298 if (unlikely(offset)) {
2299 unsigned long this_off = offset;
2301 if (this_off > this_len)
2302 this_off = this_len;
2305 this_len -= this_off;
2308 cookie_raddr += this_off;
2315 unsigned long hv_err;
2317 hv_err = sun4v_ldc_copy(lp->id, copy_dir,
2319 this_len, &actual_len);
2320 if (unlikely(hv_err)) {
2321 printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
2324 if (lp->hs_state != LDC_HS_COMPLETE ||
2325 (lp->flags & LDC_FLAG_RESET))
2331 cookie_raddr += actual_len;
2334 if (actual_len == this_len)
2337 this_len -= actual_len;
2344 /* It is caller policy what to do about short copies.
2345 * For example, a networking driver can declare the
2346 * packet a runt and drop it.
2349 return orig_len - len;
2351 EXPORT_SYMBOL(ldc_copy);
2353 void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
2354 struct ldc_trans_cookie *cookies, int *ncookies,
2355 unsigned int map_perm)
2360 if (len & (8UL - 1))
2361 return ERR_PTR(-EINVAL);
2363 buf = kzalloc(len, GFP_ATOMIC);
2365 return ERR_PTR(-ENOMEM);
2367 err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
2370 return ERR_PTR(err);
2376 EXPORT_SYMBOL(ldc_alloc_exp_dring);
2378 void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
2379 struct ldc_trans_cookie *cookies, int ncookies)
2381 ldc_unmap(lp, cookies, ncookies);
2384 EXPORT_SYMBOL(ldc_free_exp_dring);
2386 static int __init ldc_init(void)
2388 unsigned long major, minor;
2389 struct mdesc_handle *hp;
2398 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
2400 if (mp == MDESC_NODE_NULL)
2403 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
2409 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
2410 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
2414 printk(KERN_INFO "%s", version);
2417 printk(KERN_INFO PFX "Domaining disabled.\n");
2420 ldom_domaining_enabled = 1;
2428 core_initcall(ldc_init);