5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
14 * RDS Network protocol version
16 #define RDS_PROTOCOL_3_0 0x0300
17 #define RDS_PROTOCOL_3_1 0x0301
18 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
19 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
20 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
21 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
24 * XXX randomly chosen, but at least seems to be unused:
25 * # 18464-18768 Unassigned
26 * We should do better. We want a reserved port to discourage unpriv'ed
27 * userspace from listening.
29 #define RDS_PORT 18634
32 #define KERNEL_HAS_ATOMIC64
36 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38 /* sigh, pr_debug() causes unused variable warnings */
39 static inline __printf(1, 2)
40 void rdsdebug(char *fmt, ...)
45 /* XXX is there one of these somewhere? */
47 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49 #define RDS_FRAG_SHIFT 12
50 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
52 #define RDS_CONG_MAP_BYTES (65536 / 8)
53 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
54 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
57 struct rb_node m_rb_node;
59 wait_queue_head_t m_waitq;
60 struct list_head m_conn_list;
61 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
66 * This is how we will track the connection state:
67 * A connection is always in one of the following
68 * states. Updates to the state are atomic and imply
74 RDS_CONN_DISCONNECTING,
79 /* Bits for c_flags */
80 #define RDS_LL_SEND_FULL 0
81 #define RDS_RECONNECT_PENDING 1
84 struct rds_connection {
85 struct hlist_node c_hash_node;
88 unsigned int c_loopback:1;
89 struct rds_connection *c_passive;
91 struct rds_cong_map *c_lcong;
92 struct rds_cong_map *c_fcong;
94 struct rds_message *c_xmit_rm;
95 unsigned long c_xmit_sg;
96 unsigned int c_xmit_hdr_off;
97 unsigned int c_xmit_data_off;
98 unsigned int c_xmit_atomic_sent;
99 unsigned int c_xmit_rdma_sent;
100 unsigned int c_xmit_data_sent;
102 spinlock_t c_lock; /* protect msg queues */
104 struct list_head c_send_queue;
105 struct list_head c_retrans;
109 struct rds_transport *c_trans;
110 void *c_transport_data;
113 unsigned long c_send_gen;
114 unsigned long c_flags;
115 unsigned long c_reconnect_jiffies;
116 struct delayed_work c_send_w;
117 struct delayed_work c_recv_w;
118 struct delayed_work c_conn_w;
119 struct work_struct c_down_w;
120 struct mutex c_cm_lock; /* protect conn state & cm */
121 wait_queue_head_t c_waitq;
123 struct list_head c_map_item;
124 unsigned long c_map_queued;
126 unsigned int c_unacked_packets;
127 unsigned int c_unacked_bytes;
129 /* Protocol version */
130 unsigned int c_version;
131 possible_net_t c_net;
135 struct net *rds_conn_net(struct rds_connection *conn)
137 return read_pnet(&conn->c_net);
141 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
143 write_pnet(&conn->c_net, net);
146 #define RDS_FLAG_CONG_BITMAP 0x01
147 #define RDS_FLAG_ACK_REQUIRED 0x02
148 #define RDS_FLAG_RETRANSMITTED 0x04
149 #define RDS_MAX_ADV_CREDIT 255
152 * Maximum space available for extension headers.
154 #define RDS_HEADER_EXT_SPACE 16
167 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
171 * Reserved - indicates end of extensions
173 #define RDS_EXTHDR_NONE 0
176 * This extension header is included in the very
177 * first message that is sent on a new connection,
178 * and identifies the protocol level. This will help
179 * rolling updates if a future change requires breaking
181 * NB: This is no longer true for IB, where we do a version
182 * negotiation during the connection setup phase (protocol
183 * version information is included in the RDMA CM private data).
185 #define RDS_EXTHDR_VERSION 1
186 struct rds_ext_header_version {
191 * This extension header is included in the RDS message
192 * chasing an RDMA operation.
194 #define RDS_EXTHDR_RDMA 2
195 struct rds_ext_header_rdma {
200 * This extension header tells the peer about the
201 * destination <R_Key,offset> of the requested RDMA
204 #define RDS_EXTHDR_RDMA_DEST 3
205 struct rds_ext_header_rdma_dest {
207 __be32 h_rdma_offset;
210 #define __RDS_EXTHDR_MAX 16 /* for now */
212 struct rds_incoming {
214 struct list_head i_item;
215 struct rds_connection *i_conn;
216 struct rds_header i_hdr;
217 unsigned long i_rx_jiffies;
220 rds_rdma_cookie_t i_rdma_cookie;
224 struct rb_node r_rb_node;
228 /* A copy of the creation flags */
229 unsigned int r_use_once:1;
230 unsigned int r_invalidate:1;
231 unsigned int r_write:1;
233 /* This is for RDS_MR_DEAD.
234 * It would be nice & consistent to make this part of the above
235 * bit field here, but we need to use test_and_set_bit.
237 unsigned long r_state;
238 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
239 struct rds_transport *r_trans;
240 void *r_trans_private;
243 /* Flags for mr->r_state */
244 #define RDS_MR_DEAD 0
246 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
248 return r_key | (((u64) offset) << 32);
251 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
256 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
261 /* atomic operation types */
262 #define RDS_ATOMIC_TYPE_CSWP 0
263 #define RDS_ATOMIC_TYPE_FADD 1
266 * m_sock_item and m_conn_item are on lists that are serialized under
267 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
268 * the message will not be put back on the retransmit list after being sent.
269 * messages that are canceled while being sent rely on this.
271 * m_inc is used by loopback so that it can pass an incoming message straight
272 * back up into the rx path. It embeds a wire header which is also used by
273 * the send path, which is kind of awkward.
275 * m_sock_item indicates the message's presence on a socket's send or receive
276 * queue. m_rs will point to that socket.
278 * m_daddr is used by cancellation to prune messages to a given destination.
280 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
281 * nesting. As paths iterate over messages on a sock, or conn, they must
282 * also lock the conn, or sock, to remove the message from those lists too.
283 * Testing the flag to determine if the message is still on the lists lets
284 * us avoid testing the list_head directly. That means each path can use
285 * the message's list_head to keep it on a local list while juggling locks
286 * without confusing the other path.
288 * m_ack_seq is an optional field set by transports who need a different
289 * sequence number range to invalidate. They can use this in a callback
290 * that they pass to rds_send_drop_acked() to see if each message has been
291 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
292 * had ack_seq set yet.
294 #define RDS_MSG_ON_SOCK 1
295 #define RDS_MSG_ON_CONN 2
296 #define RDS_MSG_HAS_ACK_SEQ 3
297 #define RDS_MSG_ACK_REQUIRED 4
298 #define RDS_MSG_RETRANSMITTED 5
299 #define RDS_MSG_MAPPED 6
300 #define RDS_MSG_PAGEVEC 7
304 struct list_head m_sock_item;
305 struct list_head m_conn_item;
306 struct rds_incoming m_inc;
309 unsigned long m_flags;
311 /* Never access m_rs without holding m_rs_lock.
316 spinlock_t m_rs_lock;
317 wait_queue_head_t m_flush_wait;
319 struct rds_sock *m_rs;
321 /* cookie to send to remote, in rds header */
322 rds_rdma_cookie_t m_rdma_cookie;
324 unsigned int m_used_sgs;
325 unsigned int m_total_sgs;
330 struct rm_atomic_op {
336 uint64_t compare_mask;
341 uint64_t nocarry_mask;
347 unsigned int op_notify:1;
348 unsigned int op_recverr:1;
349 unsigned int op_mapped:1;
350 unsigned int op_silent:1;
351 unsigned int op_active:1;
352 struct scatterlist *op_sg;
353 struct rds_notifier *op_notifier;
355 struct rds_mr *op_rdma_mr;
360 unsigned int op_write:1;
361 unsigned int op_fence:1;
362 unsigned int op_notify:1;
363 unsigned int op_recverr:1;
364 unsigned int op_mapped:1;
365 unsigned int op_silent:1;
366 unsigned int op_active:1;
367 unsigned int op_bytes;
368 unsigned int op_nents;
369 unsigned int op_count;
370 struct scatterlist *op_sg;
371 struct rds_notifier *op_notifier;
373 struct rds_mr *op_rdma_mr;
376 unsigned int op_active:1;
377 unsigned int op_nents;
378 unsigned int op_count;
379 unsigned int op_dmasg;
380 unsigned int op_dmaoff;
381 struct scatterlist *op_sg;
387 * The RDS notifier is used (optionally) to tell the application about
388 * completed RDMA operations. Rather than keeping the whole rds message
389 * around on the queue, we allocate a small notifier that is put on the
390 * socket's notifier_list. Notifications are delivered to the application
391 * through control messages.
393 struct rds_notifier {
394 struct list_head n_list;
395 uint64_t n_user_token;
400 * struct rds_transport - transport specific behavioural hooks
402 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
403 * part of a message. The caller serializes on the send_sem so this
404 * doesn't need to be reentrant for a given conn. The header must be
405 * sent before the data payload. .xmit must be prepared to send a
406 * message with no data payload. .xmit should return the number of
407 * bytes that were sent down the connection, including header bytes.
408 * Returning 0 tells the caller that it doesn't need to perform any
409 * additional work now. This is usually the case when the transport has
410 * filled the sending queue for its connection and will handle
411 * triggering the rds thread to continue the send when space becomes
412 * available. Returning -EAGAIN tells the caller to retry the send
413 * immediately. Returning -ENOMEM tells the caller to retry the send at
414 * some point in the future.
416 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
417 * it returns the connection can not call rds_recv_incoming().
418 * This will only be called once after conn_connect returns
419 * non-zero success and will The caller serializes this with
420 * the send and connecting paths (xmit_* and conn_*). The
421 * transport is responsible for other serialization, including
422 * rds_recv_incoming(). This is called in process context but
423 * should try hard not to block.
426 struct rds_transport {
427 char t_name[TRANSNAMSIZ];
428 struct list_head t_item;
429 struct module *t_owner;
430 unsigned int t_prefer_loopback:1;
433 int (*laddr_check)(struct net *net, __be32 addr);
434 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
435 void (*conn_free)(void *data);
436 int (*conn_connect)(struct rds_connection *conn);
437 void (*conn_shutdown)(struct rds_connection *conn);
438 void (*xmit_prepare)(struct rds_connection *conn);
439 void (*xmit_complete)(struct rds_connection *conn);
440 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
441 unsigned int hdr_off, unsigned int sg, unsigned int off);
442 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
443 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
444 int (*recv)(struct rds_connection *conn);
445 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
446 void (*inc_free)(struct rds_incoming *inc);
448 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
449 struct rdma_cm_event *event);
450 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
451 void (*cm_connect_complete)(struct rds_connection *conn,
452 struct rdma_cm_event *event);
454 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
457 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
458 struct rds_sock *rs, u32 *key_ret);
459 void (*sync_mr)(void *trans_private, int direction);
460 void (*free_mr)(void *trans_private, int invalidate);
461 void (*flush_mrs)(void);
471 * bound_addr used for both incoming and outgoing, no INADDR_ANY
474 struct hlist_node rs_bound_node;
475 __be32 rs_bound_addr;
477 __be16 rs_bound_port;
479 struct rds_transport *rs_transport;
482 * rds_sendmsg caches the conn it used the last time around.
483 * This helps avoid costly lookups.
485 struct rds_connection *rs_conn;
487 /* flag indicating we were congested or not */
489 /* seen congestion (ENOBUFS) when sending? */
490 int rs_seen_congestion;
492 /* rs_lock protects all these adjacent members before the newline */
494 struct list_head rs_send_queue;
497 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
499 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
500 * to decide whether the application should be woken up.
501 * If not set, we use rs_cong_track to find out whether a cong map
504 uint64_t rs_cong_mask;
505 uint64_t rs_cong_notify;
506 struct list_head rs_cong_list;
507 unsigned long rs_cong_track;
510 * rs_recv_lock protects the receive queue, and is
511 * used to serialize with rds_release.
513 rwlock_t rs_recv_lock;
514 struct list_head rs_recv_queue;
516 /* just for stats reporting */
517 struct list_head rs_item;
519 /* these have their own lock */
520 spinlock_t rs_rdma_lock;
521 struct rb_root rs_rdma_keys;
523 /* Socket options - in case there will be more */
524 unsigned char rs_recverr,
528 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
530 return container_of(sk, struct rds_sock, rs_sk);
532 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
538 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
539 * to account for overhead. We don't account for overhead, we just apply
540 * the number of payload bytes to the specified value.
542 static inline int rds_sk_sndbuf(struct rds_sock *rs)
544 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
546 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
548 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
551 struct rds_statistics {
552 uint64_t s_conn_reset;
553 uint64_t s_recv_drop_bad_checksum;
554 uint64_t s_recv_drop_old_seq;
555 uint64_t s_recv_drop_no_sock;
556 uint64_t s_recv_drop_dead_sock;
557 uint64_t s_recv_deliver_raced;
558 uint64_t s_recv_delivered;
559 uint64_t s_recv_queued;
560 uint64_t s_recv_immediate_retry;
561 uint64_t s_recv_delayed_retry;
562 uint64_t s_recv_ack_required;
563 uint64_t s_recv_rdma_bytes;
564 uint64_t s_recv_ping;
565 uint64_t s_send_queue_empty;
566 uint64_t s_send_queue_full;
567 uint64_t s_send_lock_contention;
568 uint64_t s_send_lock_queue_raced;
569 uint64_t s_send_immediate_retry;
570 uint64_t s_send_delayed_retry;
571 uint64_t s_send_drop_acked;
572 uint64_t s_send_ack_required;
573 uint64_t s_send_queued;
574 uint64_t s_send_rdma;
575 uint64_t s_send_rdma_bytes;
576 uint64_t s_send_pong;
577 uint64_t s_page_remainder_hit;
578 uint64_t s_page_remainder_miss;
579 uint64_t s_copy_to_user;
580 uint64_t s_copy_from_user;
581 uint64_t s_cong_update_queued;
582 uint64_t s_cong_update_received;
583 uint64_t s_cong_send_error;
584 uint64_t s_cong_send_blocked;
588 void rds_sock_addref(struct rds_sock *rs);
589 void rds_sock_put(struct rds_sock *rs);
590 void rds_wake_sk_sleep(struct rds_sock *rs);
591 static inline void __rds_wake_sk_sleep(struct sock *sk)
593 wait_queue_head_t *waitq = sk_sleep(sk);
595 if (!sock_flag(sk, SOCK_DEAD) && waitq)
598 extern wait_queue_head_t rds_poll_waitq;
602 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
603 void rds_remove_bound(struct rds_sock *rs);
604 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
607 int rds_cong_get_maps(struct rds_connection *conn);
608 void rds_cong_add_conn(struct rds_connection *conn);
609 void rds_cong_remove_conn(struct rds_connection *conn);
610 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
611 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
612 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
613 void rds_cong_queue_updates(struct rds_cong_map *map);
614 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
615 int rds_cong_updated_since(unsigned long *recent);
616 void rds_cong_add_socket(struct rds_sock *);
617 void rds_cong_remove_socket(struct rds_sock *);
618 void rds_cong_exit(void);
619 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
622 int rds_conn_init(void);
623 void rds_conn_exit(void);
624 struct rds_connection *rds_conn_create(struct net *net,
625 __be32 laddr, __be32 faddr,
626 struct rds_transport *trans, gfp_t gfp);
627 struct rds_connection *rds_conn_create_outgoing(struct net *net,
628 __be32 laddr, __be32 faddr,
629 struct rds_transport *trans, gfp_t gfp);
630 void rds_conn_shutdown(struct rds_connection *conn);
631 void rds_conn_destroy(struct rds_connection *conn);
632 void rds_conn_drop(struct rds_connection *conn);
633 void rds_conn_connect_if_down(struct rds_connection *conn);
634 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
635 struct rds_info_iterator *iter,
636 struct rds_info_lengths *lens,
637 int (*visitor)(struct rds_connection *, void *),
640 void __rds_conn_error(struct rds_connection *conn, const char *, ...);
641 #define rds_conn_error(conn, fmt...) \
642 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
645 rds_conn_transition(struct rds_connection *conn, int old, int new)
647 return atomic_cmpxchg(&conn->c_state, old, new) == old;
651 rds_conn_state(struct rds_connection *conn)
653 return atomic_read(&conn->c_state);
657 rds_conn_up(struct rds_connection *conn)
659 return atomic_read(&conn->c_state) == RDS_CONN_UP;
663 rds_conn_connecting(struct rds_connection *conn)
665 return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING;
669 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
670 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
671 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
672 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
673 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
674 __be16 dport, u64 seq);
675 int rds_message_add_extension(struct rds_header *hdr,
676 unsigned int type, const void *data, unsigned int len);
677 int rds_message_next_extension(struct rds_header *hdr,
678 unsigned int *pos, void *buf, unsigned int *buflen);
679 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
680 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
681 void rds_message_inc_free(struct rds_incoming *inc);
682 void rds_message_addref(struct rds_message *rm);
683 void rds_message_put(struct rds_message *rm);
684 void rds_message_wait(struct rds_message *rm);
685 void rds_message_unmapped(struct rds_message *rm);
687 static inline void rds_message_make_checksum(struct rds_header *hdr)
690 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
693 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
695 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
700 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
702 int rds_page_copy_user(struct page *page, unsigned long offset,
703 void __user *ptr, unsigned long bytes,
705 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
706 rds_page_copy_user(page, offset, ptr, bytes, 1)
707 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
708 rds_page_copy_user(page, offset, ptr, bytes, 0)
709 void rds_page_exit(void);
712 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
714 void rds_inc_put(struct rds_incoming *inc);
715 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
716 struct rds_incoming *inc, gfp_t gfp);
717 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
719 void rds_clear_recv_queue(struct rds_sock *rs);
720 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
721 void rds_inc_info_copy(struct rds_incoming *inc,
722 struct rds_info_iterator *iter,
723 __be32 saddr, __be32 daddr, int flip);
726 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
727 void rds_send_reset(struct rds_connection *conn);
728 int rds_send_xmit(struct rds_connection *conn);
730 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
731 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
732 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
733 is_acked_func is_acked);
734 int rds_send_pong(struct rds_connection *conn, __be16 dport);
735 struct rds_message *rds_send_get_message(struct rds_connection *,
736 struct rm_rdma_op *);
739 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
740 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
741 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
742 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
743 void rds_rdma_drop_keys(struct rds_sock *rs);
744 int rds_rdma_extra_size(struct rds_rdma_args *args);
745 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
746 struct cmsghdr *cmsg);
747 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
748 struct cmsghdr *cmsg);
749 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
750 struct cmsghdr *cmsg);
751 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
752 struct cmsghdr *cmsg);
753 void rds_rdma_free_op(struct rm_rdma_op *ro);
754 void rds_atomic_free_op(struct rm_atomic_op *ao);
755 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
756 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
757 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
758 struct cmsghdr *cmsg);
760 void __rds_put_mr_final(struct rds_mr *mr);
761 static inline void rds_mr_put(struct rds_mr *mr)
763 if (atomic_dec_and_test(&mr->r_refcount))
764 __rds_put_mr_final(mr);
768 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
769 #define rds_stats_inc_which(which, member) do { \
770 per_cpu(which, get_cpu()).member++; \
773 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
774 #define rds_stats_add_which(which, member, count) do { \
775 per_cpu(which, get_cpu()).member += count; \
778 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
779 int rds_stats_init(void);
780 void rds_stats_exit(void);
781 void rds_stats_info_copy(struct rds_info_iterator *iter,
782 uint64_t *values, const char *const *names,
786 int rds_sysctl_init(void);
787 void rds_sysctl_exit(void);
788 extern unsigned long rds_sysctl_sndbuf_min;
789 extern unsigned long rds_sysctl_sndbuf_default;
790 extern unsigned long rds_sysctl_sndbuf_max;
791 extern unsigned long rds_sysctl_reconnect_min_jiffies;
792 extern unsigned long rds_sysctl_reconnect_max_jiffies;
793 extern unsigned int rds_sysctl_max_unacked_packets;
794 extern unsigned int rds_sysctl_max_unacked_bytes;
795 extern unsigned int rds_sysctl_ping_enable;
796 extern unsigned long rds_sysctl_trace_flags;
797 extern unsigned int rds_sysctl_trace_level;
800 int rds_threads_init(void);
801 void rds_threads_exit(void);
802 extern struct workqueue_struct *rds_wq;
803 void rds_queue_reconnect(struct rds_connection *conn);
804 void rds_connect_worker(struct work_struct *);
805 void rds_shutdown_worker(struct work_struct *);
806 void rds_send_worker(struct work_struct *);
807 void rds_recv_worker(struct work_struct *);
808 void rds_connect_complete(struct rds_connection *conn);
811 int rds_trans_register(struct rds_transport *trans);
812 void rds_trans_unregister(struct rds_transport *trans);
813 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
814 void rds_trans_put(struct rds_transport *trans);
815 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
817 struct rds_transport *rds_trans_get(int t_type);
818 int rds_trans_init(void);
819 void rds_trans_exit(void);