2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
37 #include <linux/list.h>
41 /* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
44 * Also, it seems fairer to not let one busy connection stall all the
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
51 static int send_batch_count = 64;
52 module_param(send_batch_count, int, 0444);
53 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
56 * Reset the send state. Caller must hold c_send_lock when calling here.
58 void rds_send_reset(struct rds_connection *conn)
60 struct rds_message *rm, *tmp;
63 spin_lock_irqsave(&conn->c_send_lock, flags);
64 if (conn->c_xmit_rm) {
66 conn->c_xmit_rm = NULL;
67 /* Tell the user the RDMA op is no longer mapped by the
68 * transport. This isn't entirely true (it's flushed out
69 * independently) but as the connection is down, there's
70 * no ongoing RDMA to/from that memory */
71 rds_message_unmapped(rm);
72 spin_unlock_irqrestore(&conn->c_send_lock, flags);
76 spin_unlock_irqrestore(&conn->c_send_lock, flags);
80 conn->c_xmit_hdr_off = 0;
81 conn->c_xmit_data_off = 0;
82 conn->c_xmit_atomic_sent = 0;
83 conn->c_xmit_rdma_sent = 0;
84 conn->c_xmit_data_sent = 0;
86 conn->c_map_queued = 0;
88 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
89 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
91 /* Mark messages as retransmissions, and move them to the send q */
92 spin_lock_irqsave(&conn->c_lock, flags);
93 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
94 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
95 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
97 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
98 spin_unlock_irqrestore(&conn->c_lock, flags);
102 * We're making the concious trade-off here to only send one message
103 * down the connection at a time.
105 * - tx queueing is a simple fifo list
106 * - reassembly is optional and easily done by transports per conn
107 * - no per flow rx lookup at all, straight to the socket
108 * - less per-frag memory and wire overhead
110 * - queued acks can be delayed behind large messages
112 * - small message latency is higher behind queued large messages
113 * - large message latency isn't starved by intervening small sends
115 int rds_send_xmit(struct rds_connection *conn)
117 struct rds_message *rm;
120 struct scatterlist *sg;
123 LIST_HEAD(to_be_dropped);
126 if (!rds_conn_up(conn))
130 * sendmsg calls here after having queued its message on the send
131 * queue. We only have one task feeding the connection at a time. If
132 * another thread is already feeding the queue then we back off. This
133 * avoids blocking the caller and trading per-connection data between
134 * caches per message.
136 if (!spin_trylock_irqsave(&conn->c_send_lock, flags)) {
137 rds_stats_inc(s_send_lock_contention);
141 atomic_inc(&conn->c_senders);
143 if (conn->c_trans->xmit_prepare)
144 conn->c_trans->xmit_prepare(conn);
146 gen = atomic_inc_return(&conn->c_send_generation);
149 * spin trying to push headers and data down the connection until
150 * the connection doesn't make forward progress.
154 rm = conn->c_xmit_rm;
157 * If between sending messages, we can send a pending congestion
160 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
161 rm = rds_cong_update_alloc(conn);
166 rm->data.op_active = 1;
168 conn->c_xmit_rm = rm;
172 * If not already working on one, grab the next message.
174 * c_xmit_rm holds a ref while we're sending this message down
175 * the connction. We can use this ref while holding the
176 * send_sem.. rds_send_reset() is serialized with it.
181 spin_lock(&conn->c_lock);
183 if (!list_empty(&conn->c_send_queue)) {
184 rm = list_entry(conn->c_send_queue.next,
187 rds_message_addref(rm);
190 * Move the message from the send queue to the retransmit
193 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
196 spin_unlock(&conn->c_lock);
201 /* Unfortunately, the way Infiniband deals with
202 * RDMA to a bad MR key is by moving the entire
203 * queue pair to error state. We cold possibly
204 * recover from that, but right now we drop the
206 * Therefore, we never retransmit messages with RDMA ops.
208 if (rm->rdma.op_active &&
209 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
210 spin_lock(&conn->c_lock);
211 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
212 list_move(&rm->m_conn_item, &to_be_dropped);
213 spin_unlock(&conn->c_lock);
217 /* Require an ACK every once in a while */
218 len = ntohl(rm->m_inc.i_hdr.h_len);
219 if (conn->c_unacked_packets == 0 ||
220 conn->c_unacked_bytes < len) {
221 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
223 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
224 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
225 rds_stats_inc(s_send_ack_required);
227 conn->c_unacked_bytes -= len;
228 conn->c_unacked_packets--;
231 conn->c_xmit_rm = rm;
234 /* The transport either sends the whole rdma or none of it */
235 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
236 rds_message_addref(rm);
237 rm->m_final_op = &rm->rdma;
238 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
243 conn->c_xmit_rdma_sent = 1;
245 /* The transport owns the mapped memory for now.
246 * You can't unmap it while it's on the send queue */
247 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
250 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
251 rds_message_addref(rm);
252 rm->m_final_op = &rm->atomic;
253 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
258 conn->c_xmit_atomic_sent = 1;
260 /* The transport owns the mapped memory for now.
261 * You can't unmap it while it's on the send queue */
262 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
266 * A number of cases require an RDS header to be sent
267 * even if there is no data.
268 * We permit 0-byte sends; rds-ping depends on this.
269 * However, if there are exclusively attached silent ops,
270 * we skip the hdr/data send, to enable silent operation.
272 if (rm->data.op_nents == 0) {
274 int all_ops_are_silent = 1;
276 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
277 if (rm->atomic.op_active && !rm->atomic.op_silent)
278 all_ops_are_silent = 0;
279 if (rm->rdma.op_active && !rm->rdma.op_silent)
280 all_ops_are_silent = 0;
282 if (ops_present && all_ops_are_silent
283 && !rm->m_rdma_cookie)
284 rm->data.op_active = 0;
287 if (rm->data.op_active && !conn->c_xmit_data_sent) {
288 rm->m_final_op = &rm->data;
289 ret = conn->c_trans->xmit(conn, rm,
290 conn->c_xmit_hdr_off,
292 conn->c_xmit_data_off);
296 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
297 tmp = min_t(int, ret,
298 sizeof(struct rds_header) -
299 conn->c_xmit_hdr_off);
300 conn->c_xmit_hdr_off += tmp;
304 sg = &rm->data.op_sg[conn->c_xmit_sg];
306 tmp = min_t(int, ret, sg->length -
307 conn->c_xmit_data_off);
308 conn->c_xmit_data_off += tmp;
310 if (conn->c_xmit_data_off == sg->length) {
311 conn->c_xmit_data_off = 0;
315 conn->c_xmit_sg == rm->data.op_nents);
319 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
320 (conn->c_xmit_sg == rm->data.op_nents))
321 conn->c_xmit_data_sent = 1;
325 * A rm will only take multiple times through this loop
326 * if there is a data op. Thus, if the data is sent (or there was
327 * none), then we're done with the rm.
329 if (!rm->data.op_active || conn->c_xmit_data_sent) {
330 conn->c_xmit_rm = NULL;
332 conn->c_xmit_hdr_off = 0;
333 conn->c_xmit_data_off = 0;
334 conn->c_xmit_rdma_sent = 0;
335 conn->c_xmit_atomic_sent = 0;
336 conn->c_xmit_data_sent = 0;
342 if (conn->c_trans->xmit_complete)
343 conn->c_trans->xmit_complete(conn);
346 * We might be racing with another sender who queued a message but
347 * backed off on noticing that we held the c_send_lock. If we check
348 * for queued messages after dropping the sem then either we'll
349 * see the queued message or the queuer will get the sem. If we
350 * notice the queued message then we trigger an immediate retry.
352 * We need to be careful only to do this when we stopped processing
353 * the send queue because it was empty. It's the only way we
354 * stop processing the loop when the transport hasn't taken
355 * responsibility for forward progress.
357 spin_unlock_irqrestore(&conn->c_send_lock, flags);
359 /* Nuke any messages we decided not to retransmit. */
360 if (!list_empty(&to_be_dropped)) {
361 /* irqs on here, so we can put(), unlike above */
362 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
364 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
367 atomic_dec(&conn->c_senders);
370 * Other senders will see we have c_send_lock and exit. We
371 * need to recheck the send queue and race again for c_send_lock
372 * to make sure messages don't just sit on the send queue, if
373 * somebody hasn't already beat us into the loop.
375 * If the transport cannot continue (i.e ret != 0), then it must
376 * call us when more room is available, such as from the tx
377 * completion handler.
381 if (!list_empty(&conn->c_send_queue)) {
382 rds_stats_inc(s_send_lock_queue_raced);
383 if (gen == atomic_read(&conn->c_send_generation)) {
392 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
394 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
396 assert_spin_locked(&rs->rs_lock);
398 BUG_ON(rs->rs_snd_bytes < len);
399 rs->rs_snd_bytes -= len;
401 if (rs->rs_snd_bytes == 0)
402 rds_stats_inc(s_send_queue_empty);
405 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
406 is_acked_func is_acked)
409 return is_acked(rm, ack);
410 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
414 * Returns true if there are no messages on the send and retransmit queues
415 * which have a sequence number greater than or equal to the given sequence
418 int rds_send_acked_before(struct rds_connection *conn, u64 seq)
420 struct rds_message *rm, *tmp;
423 spin_lock(&conn->c_lock);
425 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
426 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
431 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
432 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
437 spin_unlock(&conn->c_lock);
443 * This is pretty similar to what happens below in the ACK
444 * handling code - except that we call here as soon as we get
445 * the IB send completion on the RDMA op and the accompanying
448 void rds_rdma_send_complete(struct rds_message *rm, int status)
450 struct rds_sock *rs = NULL;
451 struct rm_rdma_op *ro;
452 struct rds_notifier *notifier;
455 spin_lock_irqsave(&rm->m_rs_lock, flags);
458 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
459 ro->op_active && ro->op_notify && ro->op_notifier) {
460 notifier = ro->op_notifier;
462 sock_hold(rds_rs_to_sk(rs));
464 notifier->n_status = status;
465 spin_lock(&rs->rs_lock);
466 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
467 spin_unlock(&rs->rs_lock);
469 ro->op_notifier = NULL;
472 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
475 rds_wake_sk_sleep(rs);
476 sock_put(rds_rs_to_sk(rs));
479 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
482 * Just like above, except looks at atomic op
484 void rds_atomic_send_complete(struct rds_message *rm, int status)
486 struct rds_sock *rs = NULL;
487 struct rm_atomic_op *ao;
488 struct rds_notifier *notifier;
491 spin_lock_irqsave(&rm->m_rs_lock, flags);
494 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
495 && ao->op_active && ao->op_notify && ao->op_notifier) {
496 notifier = ao->op_notifier;
498 sock_hold(rds_rs_to_sk(rs));
500 notifier->n_status = status;
501 spin_lock(&rs->rs_lock);
502 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
503 spin_unlock(&rs->rs_lock);
505 ao->op_notifier = NULL;
508 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
511 rds_wake_sk_sleep(rs);
512 sock_put(rds_rs_to_sk(rs));
515 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
518 * This is the same as rds_rdma_send_complete except we
519 * don't do any locking - we have all the ingredients (message,
520 * socket, socket lock) and can just move the notifier.
523 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
525 struct rm_rdma_op *ro;
526 struct rm_atomic_op *ao;
529 if (ro->op_active && ro->op_notify && ro->op_notifier) {
530 ro->op_notifier->n_status = status;
531 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
532 ro->op_notifier = NULL;
536 if (ao->op_active && ao->op_notify && ao->op_notifier) {
537 ao->op_notifier->n_status = status;
538 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
539 ao->op_notifier = NULL;
542 /* No need to wake the app - caller does this */
546 * This is called from the IB send completion when we detect
547 * a RDMA operation that failed with remote access error.
548 * So speed is not an issue here.
550 struct rds_message *rds_send_get_message(struct rds_connection *conn,
551 struct rm_rdma_op *op)
553 struct rds_message *rm, *tmp, *found = NULL;
556 spin_lock_irqsave(&conn->c_lock, flags);
558 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
559 if (&rm->rdma == op) {
560 atomic_inc(&rm->m_refcount);
566 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
567 if (&rm->rdma == op) {
568 atomic_inc(&rm->m_refcount);
575 spin_unlock_irqrestore(&conn->c_lock, flags);
579 EXPORT_SYMBOL_GPL(rds_send_get_message);
582 * This removes messages from the socket's list if they're on it. The list
583 * argument must be private to the caller, we must be able to modify it
584 * without locks. The messages must have a reference held for their
585 * position on the list. This function will drop that reference after
586 * removing the messages from the 'messages' list regardless of if it found
587 * the messages on the socket list or not.
589 void rds_send_remove_from_sock(struct list_head *messages, int status)
592 struct rds_sock *rs = NULL;
593 struct rds_message *rm;
595 while (!list_empty(messages)) {
598 rm = list_entry(messages->next, struct rds_message,
600 list_del_init(&rm->m_conn_item);
603 * If we see this flag cleared then we're *sure* that someone
604 * else beat us to removing it from the sock. If we race
605 * with their flag update we'll get the lock and then really
606 * see that the flag has been cleared.
608 * The message spinlock makes sure nobody clears rm->m_rs
609 * while we're messing with it. It does not prevent the
610 * message from being removed from the socket, though.
612 spin_lock_irqsave(&rm->m_rs_lock, flags);
613 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
614 goto unlock_and_drop;
616 if (rs != rm->m_rs) {
618 rds_wake_sk_sleep(rs);
619 sock_put(rds_rs_to_sk(rs));
622 sock_hold(rds_rs_to_sk(rs));
624 spin_lock(&rs->rs_lock);
626 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
627 struct rm_rdma_op *ro = &rm->rdma;
628 struct rds_notifier *notifier;
630 list_del_init(&rm->m_sock_item);
631 rds_send_sndbuf_remove(rs, rm);
633 if (ro->op_active && ro->op_notifier &&
634 (ro->op_notify || (ro->op_recverr && status))) {
635 notifier = ro->op_notifier;
636 list_add_tail(¬ifier->n_list,
637 &rs->rs_notify_queue);
638 if (!notifier->n_status)
639 notifier->n_status = status;
640 rm->rdma.op_notifier = NULL;
645 spin_unlock(&rs->rs_lock);
648 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
655 rds_wake_sk_sleep(rs);
656 sock_put(rds_rs_to_sk(rs));
661 * Transports call here when they've determined that the receiver queued
662 * messages up to, and including, the given sequence number. Messages are
663 * moved to the retrans queue when rds_send_xmit picks them off the send
664 * queue. This means that in the TCP case, the message may not have been
665 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
666 * checks the RDS_MSG_HAS_ACK_SEQ bit.
668 * XXX It's not clear to me how this is safely serialized with socket
669 * destruction. Maybe it should bail if it sees SOCK_DEAD.
671 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
672 is_acked_func is_acked)
674 struct rds_message *rm, *tmp;
678 spin_lock_irqsave(&conn->c_lock, flags);
680 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
681 if (!rds_send_is_acked(rm, ack, is_acked))
684 list_move(&rm->m_conn_item, &list);
685 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
688 /* order flag updates with spin locks */
689 if (!list_empty(&list))
690 smp_mb__after_clear_bit();
692 spin_unlock_irqrestore(&conn->c_lock, flags);
694 /* now remove the messages from the sock list as needed */
695 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
697 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
699 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
701 struct rds_message *rm, *tmp;
702 struct rds_connection *conn;
706 /* get all the messages we're dropping under the rs lock */
707 spin_lock_irqsave(&rs->rs_lock, flags);
709 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
710 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
711 dest->sin_port != rm->m_inc.i_hdr.h_dport))
714 list_move(&rm->m_sock_item, &list);
715 rds_send_sndbuf_remove(rs, rm);
716 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
719 /* order flag updates with the rs lock */
720 smp_mb__after_clear_bit();
722 spin_unlock_irqrestore(&rs->rs_lock, flags);
724 if (list_empty(&list))
727 /* Remove the messages from the conn */
728 list_for_each_entry(rm, &list, m_sock_item) {
730 conn = rm->m_inc.i_conn;
732 spin_lock_irqsave(&conn->c_lock, flags);
734 * Maybe someone else beat us to removing rm from the conn.
735 * If we race with their flag update we'll get the lock and
736 * then really see that the flag has been cleared.
738 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
739 spin_unlock_irqrestore(&conn->c_lock, flags);
742 list_del_init(&rm->m_conn_item);
743 spin_unlock_irqrestore(&conn->c_lock, flags);
746 * Couldn't grab m_rs_lock in top loop (lock ordering),
749 spin_lock_irqsave(&rm->m_rs_lock, flags);
751 spin_lock(&rs->rs_lock);
752 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
753 spin_unlock(&rs->rs_lock);
756 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
761 rds_wake_sk_sleep(rs);
763 while (!list_empty(&list)) {
764 rm = list_entry(list.next, struct rds_message, m_sock_item);
765 list_del_init(&rm->m_sock_item);
767 rds_message_wait(rm);
773 * we only want this to fire once so we use the callers 'queued'. It's
774 * possible that another thread can race with us and remove the
775 * message from the flow with RDS_CANCEL_SENT_TO.
777 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
778 struct rds_message *rm, __be16 sport,
779 __be16 dport, int *queued)
787 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
789 /* this is the only place which holds both the socket's rs_lock
790 * and the connection's c_lock */
791 spin_lock_irqsave(&rs->rs_lock, flags);
794 * If there is a little space in sndbuf, we don't queue anything,
795 * and userspace gets -EAGAIN. But poll() indicates there's send
796 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
797 * freed up by incoming acks. So we check the *old* value of
798 * rs_snd_bytes here to allow the last msg to exceed the buffer,
799 * and poll() now knows no more data can be sent.
801 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
802 rs->rs_snd_bytes += len;
804 /* let recv side know we are close to send space exhaustion.
805 * This is probably not the optimal way to do it, as this
806 * means we set the flag on *all* messages as soon as our
807 * throughput hits a certain threshold.
809 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
810 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
812 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
813 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
814 rds_message_addref(rm);
817 /* The code ordering is a little weird, but we're
818 trying to minimize the time we hold c_lock */
819 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
820 rm->m_inc.i_conn = conn;
821 rds_message_addref(rm);
823 spin_lock(&conn->c_lock);
824 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
825 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
826 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
827 spin_unlock(&conn->c_lock);
829 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
830 rm, len, rs, rs->rs_snd_bytes,
831 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
836 spin_unlock_irqrestore(&rs->rs_lock, flags);
842 * rds_message is getting to be quite complicated, and we'd like to allocate
843 * it all in one go. This figures out how big it needs to be up front.
845 static int rds_rm_size(struct msghdr *msg, int data_len)
847 struct cmsghdr *cmsg;
852 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
853 if (!CMSG_OK(msg, cmsg))
856 if (cmsg->cmsg_level != SOL_RDS)
859 switch (cmsg->cmsg_type) {
860 case RDS_CMSG_RDMA_ARGS:
862 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
869 case RDS_CMSG_RDMA_DEST:
870 case RDS_CMSG_RDMA_MAP:
872 /* these are valid but do no add any size */
875 case RDS_CMSG_ATOMIC_CSWP:
876 case RDS_CMSG_ATOMIC_FADD:
878 size += sizeof(struct scatterlist);
887 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
889 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
890 if (cmsg_groups == 3)
896 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
897 struct msghdr *msg, int *allocated_mr)
899 struct cmsghdr *cmsg;
902 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
903 if (!CMSG_OK(msg, cmsg))
906 if (cmsg->cmsg_level != SOL_RDS)
909 /* As a side effect, RDMA_DEST and RDMA_MAP will set
910 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
912 switch (cmsg->cmsg_type) {
913 case RDS_CMSG_RDMA_ARGS:
914 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
917 case RDS_CMSG_RDMA_DEST:
918 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
921 case RDS_CMSG_RDMA_MAP:
922 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
926 case RDS_CMSG_ATOMIC_CSWP:
927 case RDS_CMSG_ATOMIC_FADD:
928 ret = rds_cmsg_atomic(rs, rm, cmsg);
942 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
945 struct sock *sk = sock->sk;
946 struct rds_sock *rs = rds_sk_to_rs(sk);
947 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
950 struct rds_message *rm = NULL;
951 struct rds_connection *conn;
953 int queued = 0, allocated_mr = 0;
954 int nonblock = msg->msg_flags & MSG_DONTWAIT;
955 long timeo = sock_sndtimeo(sk, nonblock);
957 /* Mirror Linux UDP mirror of BSD error message compatibility */
958 /* XXX: Perhaps MSG_MORE someday */
959 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
960 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
965 if (msg->msg_namelen) {
966 /* XXX fail non-unicast destination IPs? */
967 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
971 daddr = usin->sin_addr.s_addr;
972 dport = usin->sin_port;
974 /* We only care about consistency with ->connect() */
976 daddr = rs->rs_conn_addr;
977 dport = rs->rs_conn_port;
981 /* racing with another thread binding seems ok here */
982 if (daddr == 0 || rs->rs_bound_addr == 0) {
983 ret = -ENOTCONN; /* XXX not a great errno */
987 /* size of rm including all sgs */
988 ret = rds_rm_size(msg, payload_len);
992 rm = rds_message_alloc(ret, GFP_KERNEL);
998 /* Attach data to the rm */
1000 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1001 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
1005 rm->data.op_active = 1;
1007 rm->m_daddr = daddr;
1009 /* rds_conn_create has a spinlock that runs with IRQ off.
1010 * Caching the conn in the socket helps a lot. */
1011 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1014 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
1016 sock->sk->sk_allocation);
1018 ret = PTR_ERR(conn);
1024 /* Parse any control messages the user may have included. */
1025 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1029 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1030 if (printk_ratelimit())
1031 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1032 &rm->rdma, conn->c_trans->xmit_rdma);
1037 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1038 if (printk_ratelimit())
1039 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1040 &rm->atomic, conn->c_trans->xmit_atomic);
1045 /* If the connection is down, trigger a connect. We may
1046 * have scheduled a delayed reconnect however - in this case
1047 * we should not interfere.
1049 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1050 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1051 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1053 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1055 rs->rs_seen_congestion = 1;
1059 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1061 rds_stats_inc(s_send_queue_full);
1062 /* XXX make sure this is reasonable */
1063 if (payload_len > rds_sk_sndbuf(rs)) {
1072 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1073 rds_send_queue_rm(rs, conn, rm,
1078 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1079 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1089 * By now we've committed to the send. We reuse rds_send_worker()
1090 * to retry sends in the rds thread if the transport asks us to.
1092 rds_stats_inc(s_send_queued);
1094 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1095 rds_send_xmit(conn);
1097 rds_message_put(rm);
1101 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1102 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1103 * or in any other way, we need to destroy the MR again */
1105 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1108 rds_message_put(rm);
1113 * Reply to a ping packet.
1116 rds_send_pong(struct rds_connection *conn, __be16 dport)
1118 struct rds_message *rm;
1119 unsigned long flags;
1122 rm = rds_message_alloc(0, GFP_ATOMIC);
1128 rm->m_daddr = conn->c_faddr;
1129 rm->data.op_active = 1;
1131 /* If the connection is down, trigger a connect. We may
1132 * have scheduled a delayed reconnect however - in this case
1133 * we should not interfere.
1135 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1136 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1137 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1139 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1143 spin_lock_irqsave(&conn->c_lock, flags);
1144 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1145 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1146 rds_message_addref(rm);
1147 rm->m_inc.i_conn = conn;
1149 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1150 conn->c_next_tx_seq);
1151 conn->c_next_tx_seq++;
1152 spin_unlock_irqrestore(&conn->c_lock, flags);
1154 rds_stats_inc(s_send_queued);
1155 rds_stats_inc(s_send_pong);
1157 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1158 rds_send_xmit(conn);
1160 rds_message_put(rm);
1165 rds_message_put(rm);