2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
42 static void rds_ib_send_rdma_complete(struct rds_message *rm,
48 case IB_WC_WR_FLUSH_ERR:
52 notify_status = RDS_RDMA_SUCCESS;
55 case IB_WC_REM_ACCESS_ERR:
56 notify_status = RDS_RDMA_REMOTE_ERROR;
60 notify_status = RDS_RDMA_OTHER_ERROR;
63 rds_rdma_send_complete(rm, notify_status);
66 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
67 struct rds_rdma_op *op)
70 ib_dma_unmap_sg(ic->i_cm_id->device,
71 op->r_sg, op->r_nents,
72 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
77 static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
78 struct rds_ib_send_work *send,
81 struct rds_message *rm = send->s_rm;
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
85 ib_dma_unmap_sg(ic->i_cm_id->device,
86 rm->m_sg, rm->m_nents,
89 if (rm->m_rdma_op != NULL) {
90 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
98 * 2. Notify when the IB stack gives us the completion event for
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
112 rds_ib_send_rdma_complete(rm, wc_status);
114 if (rm->m_rdma_op->r_write)
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
120 /* If anyone waited for this message to get flushed out, wake
122 rds_message_unmapped(rm);
128 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
130 struct rds_ib_send_work *send;
133 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
139 send->s_wr.wr_id = i;
140 send->s_wr.sg_list = send->s_sge;
141 send->s_wr.num_sge = 1;
142 send->s_wr.opcode = IB_WR_SEND;
143 send->s_wr.send_flags = 0;
144 send->s_wr.ex.imm_data = 0;
146 sge = rds_ib_data_sge(ic, send->s_sge);
147 sge->lkey = ic->i_mr->lkey;
149 sge = rds_ib_header_sge(ic, send->s_sge);
150 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
151 sge->length = sizeof(struct rds_header);
152 sge->lkey = ic->i_mr->lkey;
156 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
158 struct rds_ib_send_work *send;
161 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
162 if (send->s_wr.opcode == 0xdead)
165 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
167 rds_ib_send_unmap_rdma(ic, send->s_op);
172 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
173 * operations performed in the send path. As the sender allocs and potentially
174 * unallocs the next free entry in the ring it doesn't alter which is
175 * the next to be freed, which is what this is concerned with.
177 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
179 struct rds_connection *conn = context;
180 struct rds_ib_connection *ic = conn->c_transport_data;
182 struct rds_ib_send_work *send;
188 rdsdebug("cq %p conn %p\n", cq, conn);
189 rds_ib_stats_inc(s_ib_tx_cq_call);
190 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
192 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
194 while (ib_poll_cq(cq, 1, &wc) > 0) {
195 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
196 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
197 be32_to_cpu(wc.ex.imm_data));
198 rds_ib_stats_inc(s_ib_tx_cq_event);
200 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
201 if (ic->i_ack_queued + HZ/2 < jiffies)
202 rds_ib_stats_inc(s_ib_tx_stalled);
203 rds_ib_ack_send_complete(ic);
207 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
209 completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
211 for (i = 0; i < completed; i++) {
212 send = &ic->i_sends[oldest];
214 /* In the error case, wc.opcode sometimes contains garbage */
215 switch (send->s_wr.opcode) {
218 rds_ib_send_unmap_rm(ic, send, wc.status);
220 case IB_WR_RDMA_WRITE:
221 case IB_WR_RDMA_READ:
222 /* Nothing to be done - the SG list will be unmapped
223 * when the SEND completes. */
226 if (printk_ratelimit())
228 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
229 __func__, send->s_wr.opcode);
233 send->s_wr.opcode = 0xdead;
234 send->s_wr.num_sge = 1;
235 if (send->s_queued + HZ/2 < jiffies)
236 rds_ib_stats_inc(s_ib_tx_stalled);
238 /* If a RDMA operation produced an error, signal this right
239 * away. If we don't, the subsequent SEND that goes with this
240 * RDMA will be canceled with ERR_WFLUSH, and the application
241 * never learn that the RDMA failed. */
242 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
243 struct rds_message *rm;
245 rm = rds_send_get_message(conn, send->s_op);
248 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
249 rds_ib_send_rdma_complete(rm, wc.status);
254 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
257 rds_ib_ring_free(&ic->i_send_ring, completed);
259 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
260 test_bit(0, &conn->c_map_queued))
261 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
263 /* We expect errors as the qp is drained during shutdown */
264 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
265 rds_ib_conn_error(conn,
266 "send completion on %pI4 "
267 "had status %u, disconnecting and reconnecting\n",
268 &conn->c_faddr, wc.status);
274 * This is the main function for allocating credits when sending
277 * Conceptually, we have two counters:
278 * - send credits: this tells us how many WRs we're allowed
279 * to submit without overruning the reciever's queue. For
280 * each SEND WR we post, we decrement this by one.
282 * - posted credits: this tells us how many WRs we recently
283 * posted to the receive queue. This value is transferred
284 * to the peer as a "credit update" in a RDS header field.
285 * Every time we transmit credits to the peer, we subtract
286 * the amount of transferred credits from this counter.
288 * It is essential that we avoid situations where both sides have
289 * exhausted their send credits, and are unable to send new credits
290 * to the peer. We achieve this by requiring that we send at least
291 * one credit update to the peer before exhausting our credits.
292 * When new credits arrive, we subtract one credit that is withheld
293 * until we've posted new buffers and are ready to transmit these
294 * credits (see rds_ib_send_add_credits below).
296 * The RDS send code is essentially single-threaded; rds_send_xmit
297 * grabs c_send_lock to ensure exclusive access to the send ring.
298 * However, the ACK sending code is independent and can race with
301 * In the send path, we need to update the counters for send credits
302 * and the counter of posted buffers atomically - when we use the
303 * last available credit, we cannot allow another thread to race us
304 * and grab the posted credits counter. Hence, we have to use a
305 * spinlock to protect the credit counter, or use atomics.
307 * Spinlocks shared between the send and the receive path are bad,
308 * because they create unnecessary delays. An early implementation
309 * using a spinlock showed a 5% degradation in throughput at some
312 * This implementation avoids spinlocks completely, putting both
313 * counters into a single atomic, and updating that atomic using
314 * atomic_add (in the receive path, when receiving fresh credits),
315 * and using atomic_cmpxchg when updating the two counters.
317 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
318 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
320 unsigned int avail, posted, got = 0, advertise;
329 oldval = newval = atomic_read(&ic->i_credits);
330 posted = IB_GET_POST_CREDITS(oldval);
331 avail = IB_GET_SEND_CREDITS(oldval);
333 rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
334 wanted, avail, posted);
336 /* The last credit must be used to send a credit update. */
337 if (avail && !posted)
340 if (avail < wanted) {
341 struct rds_connection *conn = ic->i_cm_id->context;
343 /* Oops, there aren't that many credits left! */
344 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
347 /* Sometimes you get what you want, lalala. */
350 newval -= IB_SET_SEND_CREDITS(got);
353 * If need_posted is non-zero, then the caller wants
354 * the posted regardless of whether any send credits are
357 if (posted && (got || need_posted)) {
358 advertise = min_t(unsigned int, posted, max_posted);
359 newval -= IB_SET_POST_CREDITS(advertise);
362 /* Finally bill everything */
363 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
366 *adv_credits = advertise;
370 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
372 struct rds_ib_connection *ic = conn->c_transport_data;
377 rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
379 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
380 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
382 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
383 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
384 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
386 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
388 rds_ib_stats_inc(s_ib_rx_credit_updates);
391 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
393 struct rds_ib_connection *ic = conn->c_transport_data;
398 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
400 /* Decide whether to send an update to the peer now.
401 * If we would send a credit update for every single buffer we
402 * post, we would end up with an ACK storm (ACK arrives,
403 * consumes buffer, we refill the ring, send ACK to remote
404 * advertising the newly posted buffer... ad inf)
406 * Performance pretty much depends on how often we send
407 * credit updates - too frequent updates mean lots of ACKs.
408 * Too infrequent updates, and the peer will run out of
409 * credits and has to throttle.
410 * For the time being, 16 seems to be a good compromise.
412 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
413 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
417 rds_ib_xmit_populate_wr(struct rds_ib_connection *ic,
418 struct rds_ib_send_work *send, unsigned int pos,
419 unsigned long buffer, unsigned int length,
424 WARN_ON(pos != send - ic->i_sends);
426 send->s_wr.send_flags = send_flags;
427 send->s_wr.opcode = IB_WR_SEND;
428 send->s_wr.num_sge = 2;
429 send->s_wr.next = NULL;
430 send->s_queued = jiffies;
434 sge = rds_ib_data_sge(ic, send->s_sge);
436 sge->length = length;
437 sge->lkey = ic->i_mr->lkey;
439 sge = rds_ib_header_sge(ic, send->s_sge);
441 /* We're sending a packet with no payload. There is only
443 send->s_wr.num_sge = 1;
444 sge = &send->s_sge[0];
447 sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
448 sge->length = sizeof(struct rds_header);
449 sge->lkey = ic->i_mr->lkey;
453 * This can be called multiple times for a given message. The first time
454 * we see a message we map its scatterlist into the IB device so that
455 * we can provide that mapped address to the IB scatter gather entries
456 * in the IB work requests. We translate the scatterlist into a series
457 * of work requests that fragment the message. These work requests complete
458 * in order so we pass ownership of the message to the completion handler
459 * once we send the final fragment.
461 * The RDS core uses the c_send_lock to only enter this function once
462 * per connection. This makes sure that the tx ring alloc/unalloc pairs
463 * don't get out of sync and confuse the ring.
465 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
466 unsigned int hdr_off, unsigned int sg, unsigned int off)
468 struct rds_ib_connection *ic = conn->c_transport_data;
469 struct ib_device *dev = ic->i_cm_id->device;
470 struct rds_ib_send_work *send = NULL;
471 struct rds_ib_send_work *first;
472 struct rds_ib_send_work *prev;
473 struct ib_send_wr *failed_wr;
474 struct scatterlist *scat;
484 int flow_controlled = 0;
486 BUG_ON(off % RDS_FRAG_SIZE);
487 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
489 /* Do not send cong updates to IB loopback */
491 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
492 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
493 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
496 /* FIXME we may overallocate here */
497 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
500 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
502 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
503 if (work_alloc == 0) {
504 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
505 rds_ib_stats_inc(s_ib_tx_ring_full);
510 credit_alloc = work_alloc;
512 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
513 adv_credits += posted;
514 if (credit_alloc < work_alloc) {
515 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
516 work_alloc = credit_alloc;
519 if (work_alloc == 0) {
520 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
521 rds_ib_stats_inc(s_ib_tx_throttle);
527 /* map the message the first time we see it */
528 if (ic->i_rm == NULL) {
530 printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n",
531 be16_to_cpu(rm->m_inc.i_hdr.h_dport),
532 rm->m_inc.i_hdr.h_flags,
533 be32_to_cpu(rm->m_inc.i_hdr.h_len));
536 rm->m_count = ib_dma_map_sg(dev,
537 rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
538 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
539 if (rm->m_count == 0) {
540 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
541 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
542 ret = -ENOMEM; /* XXX ? */
549 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
550 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
551 rds_message_addref(rm);
554 /* Finalize the header */
555 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
556 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
557 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
558 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
560 /* If it has a RDMA op, tell the peer we did it. This is
561 * used by the peer to release use-once RDMA MRs. */
563 struct rds_ext_header_rdma ext_hdr;
565 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
566 rds_message_add_extension(&rm->m_inc.i_hdr,
567 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
569 if (rm->m_rdma_cookie) {
570 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
571 rds_rdma_cookie_key(rm->m_rdma_cookie),
572 rds_rdma_cookie_offset(rm->m_rdma_cookie));
575 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
576 * we should not do this unless we have a chance of at least
577 * sticking the header into the send ring. Which is why we
578 * should call rds_ib_ring_alloc first. */
579 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
580 rds_message_make_checksum(&rm->m_inc.i_hdr);
583 * Update adv_credits since we reset the ACK_REQUIRED bit.
585 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
586 adv_credits += posted;
587 BUG_ON(adv_credits > 255);
590 send = &ic->i_sends[pos];
593 scat = &rm->m_sg[sg];
597 /* Sometimes you want to put a fence between an RDMA
598 * READ and the following SEND.
599 * We could either do this all the time
600 * or when requested by the user. Right now, we let
601 * the application choose.
603 if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
604 send_flags = IB_SEND_FENCE;
607 * We could be copying the header into the unused tail of the page.
608 * That would need to be changed in the future when those pages might
609 * be mapped userspace pages or page cache pages. So instead we always
610 * use a second sge and our long-lived ring of mapped headers. We send
611 * the header after the data so that the data payload can be aligned on
615 /* handle a 0-len message */
616 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
617 rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
621 /* if there's data reference it with a chain of work reqs */
622 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
625 send = &ic->i_sends[pos];
627 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
628 rds_ib_xmit_populate_wr(ic, send, pos,
629 ib_sg_dma_address(dev, scat) + off, len,
633 * We want to delay signaling completions just enough to get
634 * the batching benefits but not so much that we create dead time
637 if (ic->i_unsignaled_wrs-- == 0) {
638 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
639 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
642 ic->i_unsignaled_bytes -= len;
643 if (ic->i_unsignaled_bytes <= 0) {
644 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
645 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
649 * Always signal the last one if we're stopping due to flow control.
651 if (flow_controlled && i == (work_alloc-1))
652 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
654 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
655 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
659 if (off == ib_sg_dma_len(dev, scat)) {
665 /* Tack on the header after the data. The header SGE should already
666 * have been set up to point to the right header buffer. */
667 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
670 struct rds_header *hdr = &ic->i_send_hdrs[pos];
672 printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
673 be16_to_cpu(hdr->h_dport),
675 be32_to_cpu(hdr->h_len));
678 struct rds_header *hdr = &ic->i_send_hdrs[pos];
680 /* add credit and redo the header checksum */
681 hdr->h_credit = adv_credits;
682 rds_message_make_checksum(hdr);
684 rds_ib_stats_inc(s_ib_tx_credit_updates);
688 prev->s_wr.next = &send->s_wr;
691 pos = (pos + 1) % ic->i_send_ring.w_nr;
694 /* Account the RDS header in the number of bytes we sent, but just once.
695 * The caller has no concept of fragmentation. */
697 sent += sizeof(struct rds_header);
699 /* if we finished the message then send completion owns it */
700 if (scat == &rm->m_sg[rm->m_count]) {
701 prev->s_rm = ic->i_rm;
702 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
706 if (i < work_alloc) {
707 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
710 if (ic->i_flowctl && i < credit_alloc)
711 rds_ib_send_add_credits(conn, credit_alloc - i);
713 /* XXX need to worry about failed_wr and partial sends. */
714 failed_wr = &first->s_wr;
715 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
716 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
717 first, &first->s_wr, ret, failed_wr);
718 BUG_ON(failed_wr != &first->s_wr);
720 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
721 "returned %d\n", &conn->c_faddr, ret);
722 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
724 ic->i_rm = prev->s_rm;
728 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
738 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
740 struct rds_ib_connection *ic = conn->c_transport_data;
741 struct rds_ib_send_work *send = NULL;
742 struct rds_ib_send_work *first;
743 struct rds_ib_send_work *prev;
744 struct ib_send_wr *failed_wr;
745 struct rds_ib_device *rds_ibdev;
746 struct scatterlist *scat;
748 u64 remote_addr = op->r_remote_addr;
757 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
759 /* map the message the first time we see it */
761 op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
762 op->r_sg, op->r_nents, (op->r_write) ?
763 DMA_TO_DEVICE : DMA_FROM_DEVICE);
764 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
765 if (op->r_count == 0) {
766 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
767 ret = -ENOMEM; /* XXX ? */
775 * Instead of knowing how to return a partial rdma read/write we insist that there
776 * be enough work requests to send the entire message.
778 i = ceil(op->r_count, rds_ibdev->max_sge);
780 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
781 if (work_alloc != i) {
782 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
783 rds_ib_stats_inc(s_ib_tx_ring_full);
788 send = &ic->i_sends[pos];
793 num_sge = op->r_count;
795 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
796 send->s_wr.send_flags = 0;
797 send->s_queued = jiffies;
799 * We want to delay signaling completions just enough to get
800 * the batching benefits but not so much that we create dead time on the wire.
802 if (ic->i_unsignaled_wrs-- == 0) {
803 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
804 send->s_wr.send_flags = IB_SEND_SIGNALED;
807 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
808 send->s_wr.wr.rdma.remote_addr = remote_addr;
809 send->s_wr.wr.rdma.rkey = op->r_key;
812 if (num_sge > rds_ibdev->max_sge) {
813 send->s_wr.num_sge = rds_ibdev->max_sge;
814 num_sge -= rds_ibdev->max_sge;
816 send->s_wr.num_sge = num_sge;
819 send->s_wr.next = NULL;
822 prev->s_wr.next = &send->s_wr;
824 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
825 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
826 send->s_sge[j].addr =
827 ib_sg_dma_address(ic->i_cm_id->device, scat);
828 send->s_sge[j].length = len;
829 send->s_sge[j].lkey = ic->i_mr->lkey;
832 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
838 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
839 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
842 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
846 /* if we finished the message then send completion owns it */
847 if (scat == &op->r_sg[op->r_count])
848 prev->s_wr.send_flags = IB_SEND_SIGNALED;
850 if (i < work_alloc) {
851 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
855 failed_wr = &first->s_wr;
856 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
857 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
858 first, &first->s_wr, ret, failed_wr);
859 BUG_ON(failed_wr != &first->s_wr);
861 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
862 "returned %d\n", &conn->c_faddr, ret);
863 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
867 if (unlikely(failed_wr != &first->s_wr)) {
868 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
869 BUG_ON(failed_wr != &first->s_wr);
877 void rds_ib_xmit_complete(struct rds_connection *conn)
879 struct rds_ib_connection *ic = conn->c_transport_data;
881 /* We may have a pending ACK or window update we were unable
882 * to send previously (due to flow control). Try again. */
883 rds_ib_attempt_ack(ic);