2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/types.h>
38 #include <linux/spinlock.h>
39 #include <linux/kernel.h>
40 #include <linux/interrupt.h>
41 #include <rdma/ib_pack.h>
43 #include "ipath_layer.h"
44 #include "verbs_debug.h"
46 #define QPN_MAX (1 << 24)
47 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
50 * Increment this value if any changes that break userspace ABI
51 * compatibility are made.
53 #define IPATH_UVERBS_ABI_VERSION 1
56 * Define an ib_cq_notify value that is not valid so we know when CQ
57 * notifications are armed.
59 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
61 #define IB_RNR_NAK 0x20
62 #define IB_NAK_PSN_ERROR 0x60
63 #define IB_NAK_INVALID_REQUEST 0x61
64 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
65 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
66 #define IB_NAK_INVALID_RD_REQUEST 0x64
68 #define IPATH_POST_SEND_OK 0x01
69 #define IPATH_POST_RECV_OK 0x02
70 #define IPATH_PROCESS_RECV_OK 0x04
71 #define IPATH_PROCESS_SEND_OK 0x08
73 /* IB Performance Manager status values */
74 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
75 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
76 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
78 /* Mandatory IB performance counter select values. */
79 #define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001)
80 #define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002)
81 #define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003)
82 #define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004)
83 #define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005)
89 } __attribute__ ((packed));
91 struct ib_atomic_eth {
96 } __attribute__ ((packed));
98 struct ipath_other_headers {
111 __be64 atomic_ack_eth;
115 struct ib_atomic_eth atomic_eth;
117 } __attribute__ ((packed));
120 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
121 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
122 * will be in the eager header buffer. The remaining 12 or 16 bytes
123 * are in the data buffer.
125 struct ipath_ib_header {
130 struct ipath_other_headers oth;
132 struct ipath_other_headers oth;
134 } __attribute__ ((packed));
137 * There is one struct ipath_mcast for each multicast GID.
138 * All attached QPs are then stored as a list of
139 * struct ipath_mcast_qp.
141 struct ipath_mcast_qp {
142 struct list_head list;
147 struct rb_node rb_node;
149 struct list_head qp_list;
150 wait_queue_head_t wait;
157 struct ipath_mregion mr; /* must be last */
160 /* Fast memory region */
164 struct ipath_mregion mr; /* must be last */
167 /* Protection domain */
170 int user; /* non-zero if created from user space */
176 struct ib_ah_attr attr;
180 * Quick description of our CQ/QP locking scheme:
182 * We have one global lock that protects dev->cq/qp_table. Each
183 * struct ipath_cq/qp also has its own lock. An individual qp lock
184 * may be taken inside of an individual cq lock. Both cqs attached to
185 * a qp may be locked, with the send cq locked first. No other
186 * nesting should be done.
188 * Each struct ipath_cq/qp also has an atomic_t ref count. The
189 * pointer from the cq/qp_table to the struct counts as one reference.
190 * This reference also is good for access through the consumer API, so
191 * modifying the CQ/QP etc doesn't need to take another reference.
192 * Access because of a completion being polled does need a reference.
194 * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the
195 * destroy function to sleep on.
197 * This means that access from the consumer API requires nothing but
198 * taking the struct's lock.
200 * Access because of a completion event should go as follows:
201 * - lock cq/qp_table and look up struct
202 * - increment ref count in struct
203 * - drop cq/qp_table lock
204 * - lock struct, do your thing, and unlock struct
205 * - decrement ref count; if zero, wake up waiters
207 * To destroy a CQ/QP, we can do the following:
208 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
209 * - decrement ref count
210 * - wait_event until ref count is zero
212 * It is the consumer's responsibilty to make sure that no QP
213 * operations (WQE posting or state modification) are pending when the
214 * QP is destroyed. Also, the consumer must make sure that calls to
215 * qp_modify are serialized.
217 * Possible optimizations (wait for profile data to see if/where we
218 * have locks bouncing between CPUs):
219 * - split cq/qp table lock into n separate (cache-aligned) locks,
220 * indexed (say) by the page in the table
225 struct tasklet_struct comptask;
229 u32 head; /* new records added to the head */
230 u32 tail; /* poll_cq() reads from here. */
231 struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */
235 * Send work request queue entry.
236 * The size of the sg_list is determined when the QP is created and stored
240 struct ib_send_wr wr; /* don't use wr.sg_list */
241 u32 psn; /* first packet sequence number */
242 u32 lpsn; /* last packet sequence number */
243 u32 ssn; /* send sequence number */
244 u32 length; /* total length of data in sg_list */
245 struct ipath_sge sg_list[0];
249 * Receive work request queue entry.
250 * The size of the sg_list is determined when the QP is created and stored
255 u32 length; /* total length of data in sg_list */
257 struct ipath_sge sg_list[0];
262 u32 head; /* new work requests posted to the head */
263 u32 tail; /* receives pull requests from here. */
264 u32 size; /* size of RWQE array */
266 struct ipath_rwqe *wq; /* RWQE array */
272 /* send signal when number of RWQEs < limit */
277 * Variables prefixed with s_ are for the requester (sender).
278 * Variables prefixed with r_ are for the responder (receiver).
279 * Variables prefixed with ack_ are for responder replies.
281 * Common variables are protected by both r_rq.lock and s_lock in that order
282 * which only happens in modify_qp() or changing the QP 'state'.
286 struct ipath_qp *next; /* link list for QPN hash table */
287 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
288 struct list_head piowait; /* link for wait PIO buf */
289 struct list_head timerwait; /* link for waiting for timeouts */
290 struct ib_ah_attr remote_ah_attr;
291 struct ipath_ib_header s_hdr; /* next packet header to send */
293 wait_queue_head_t wait;
294 struct tasklet_struct s_task;
295 struct ipath_sge_state *s_cur_sge;
296 struct ipath_sge_state s_sge; /* current send request data */
297 /* current RDMA read send data */
298 struct ipath_sge_state s_rdma_sge;
299 struct ipath_sge_state r_sge; /* current receive data */
301 unsigned long s_flags;
302 u32 s_hdrwords; /* size of s_hdr in 32 bit words */
303 u32 s_cur_size; /* size of send packet in bytes */
304 u32 s_len; /* total length of s_sge */
305 u32 s_rdma_len; /* total length of s_rdma_sge */
306 u32 s_next_psn; /* PSN for next request */
307 u32 s_last_psn; /* last response PSN processed */
308 u32 s_psn; /* current packet sequence number */
309 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
310 u32 s_ack_psn; /* PSN for next ACK or RDMA_READ */
311 u64 s_ack_atomic; /* data for atomic ACK */
312 u64 r_wr_id; /* ID for current receive WQE */
313 u64 r_atomic_data; /* data for last atomic op */
314 u32 r_atomic_psn; /* PSN of last atomic op */
315 u32 r_len; /* total length of r_sge */
316 u32 r_rcv_len; /* receive data len processed */
317 u32 r_psn; /* expected rcv packet sequence number */
318 u8 state; /* QP state */
319 u8 s_state; /* opcode of last packet sent */
320 u8 s_ack_state; /* opcode of packet to ACK */
321 u8 s_nak_state; /* non-zero if NAK is pending */
322 u8 r_state; /* opcode of last packet received */
323 u8 r_reuse_sge; /* for UC receive errors */
324 u8 r_sge_inx; /* current index into sg_list */
325 u8 s_max_sge; /* size of s_wq->sg_list */
327 u8 s_retry_cnt; /* number of times to retry */
330 u8 s_retry; /* requester retry counter */
331 u8 s_rnr_retry; /* requester RNR retry counter */
332 u8 s_pkey_index; /* PKEY index to use */
333 enum ib_mtu path_mtu;
334 atomic_t msn; /* message sequence number */
336 u32 qkey; /* QKEY for this QP (for UD or RD) */
337 u32 s_size; /* send work queue size */
338 u32 s_head; /* new entries added here */
339 u32 s_tail; /* next entry to process */
340 u32 s_cur; /* current work queue entry */
341 u32 s_last; /* last un-ACK'ed entry */
342 u32 s_ssn; /* SSN of tail entry */
343 u32 s_lsn; /* limit sequence number (credit) */
344 struct ipath_swqe *s_wq; /* send work queue */
345 struct ipath_rq r_rq; /* receive work queue */
349 * Bit definitions for s_flags.
351 #define IPATH_S_BUSY 0
352 #define IPATH_S_SIGNAL_REQ_WR 1
355 * Since struct ipath_swqe is not a fixed size, we can't simply index into
356 * struct ipath_qp.s_wq. This function does the array index computation.
358 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
361 return (struct ipath_swqe *)((char *)qp->s_wq +
362 (sizeof(struct ipath_swqe) +
364 sizeof(struct ipath_sge)) * n);
368 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
369 * struct ipath_rq.wq. This function does the array index computation.
371 static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
374 return (struct ipath_rwqe *)
376 (sizeof(struct ipath_rwqe) +
377 rq->max_sge * sizeof(struct ipath_sge)) * n);
381 * QPN-map pages start out as NULL, they get allocated upon
382 * first use and are never deallocated. This way,
383 * large bitmaps are not allocated unless large numbers of QPs are used.
390 struct ipath_qp_table {
392 u32 last; /* last QP number allocated */
393 u32 max; /* size of the hash table */
394 u32 nmaps; /* size of the map table */
395 struct ipath_qp **table;
396 /* bit map of free numbers */
397 struct qpn_map map[QPNMAP_ENTRIES];
400 struct ipath_lkey_table {
402 u32 next; /* next unused index (speeds search) */
403 u32 gen; /* generation count */
404 u32 max; /* size of the table */
405 struct ipath_mregion **table;
408 struct ipath_opcode_stats {
409 u64 n_packets; /* number of packets */
410 u64 n_bytes; /* total number of bytes */
414 struct ib_device ibdev;
415 struct list_head dev_list;
416 struct ipath_devdata *dd;
417 int ib_unit; /* This is the device number */
418 u16 sm_lid; /* in host order */
420 u8 mkeyprot_resv_lmc;
421 /* non-zero when timer is set */
422 unsigned long mkey_lease_timeout;
424 /* The following fields are really per port. */
425 struct ipath_qp_table qp_table;
426 struct ipath_lkey_table lk_table;
427 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
428 struct list_head piowait; /* list for wait PIO buf */
429 /* list of QPs waiting for RNR timer */
430 struct list_head rnrwait;
431 spinlock_t pending_lock;
432 __be64 sys_image_guid; /* in network order */
433 __be64 gid_prefix; /* in network order */
435 u64 ipath_sword; /* total dwords sent (sample result) */
436 u64 ipath_rword; /* total dwords received (sample result) */
437 u64 ipath_spkts; /* total packets sent (sample result) */
438 u64 ipath_rpkts; /* total packets received (sample result) */
439 /* # of ticks no data sent (sample result) */
441 u64 rcv_errors; /* # of packets with SW detected rcv errs */
442 u64 n_unicast_xmit; /* total unicast packets sent */
443 u64 n_unicast_rcv; /* total unicast packets received */
444 u64 n_multicast_xmit; /* total multicast packets sent */
445 u64 n_multicast_rcv; /* total multicast packets received */
446 u64 z_symbol_error_counter; /* starting count for PMA */
447 u64 z_link_error_recovery_counter; /* starting count for PMA */
448 u64 z_link_downed_counter; /* starting count for PMA */
449 u64 z_port_rcv_errors; /* starting count for PMA */
450 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
451 u64 z_port_xmit_discards; /* starting count for PMA */
452 u64 z_port_xmit_data; /* starting count for PMA */
453 u64 z_port_rcv_data; /* starting count for PMA */
454 u64 z_port_xmit_packets; /* starting count for PMA */
455 u64 z_port_rcv_packets; /* starting count for PMA */
456 u32 z_pkey_violations; /* starting count for PMA */
471 u32 pma_sample_start;
472 u32 pma_sample_interval;
473 __be16 pma_counter_select[5];
477 u16 mkey_lease_period;
478 u16 pending_index; /* which pending queue is active */
479 u8 pma_sample_status;
481 u8 link_width_enabled;
483 struct ipath_opcode_stats opstats[128];
486 struct ipath_ucontext {
487 struct ib_ucontext ibucontext;
490 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
492 return container_of(ibmr, struct ipath_mr, ibmr);
495 static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
497 return container_of(ibfmr, struct ipath_fmr, ibfmr);
500 static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
502 return container_of(ibpd, struct ipath_pd, ibpd);
505 static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
507 return container_of(ibah, struct ipath_ah, ibah);
510 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
512 return container_of(ibcq, struct ipath_cq, ibcq);
515 static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
517 return container_of(ibsrq, struct ipath_srq, ibsrq);
520 static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
522 return container_of(ibqp, struct ipath_qp, ibqp);
525 static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
527 return container_of(ibdev, struct ipath_ibdev, ibdev);
530 int ipath_process_mad(struct ib_device *ibdev,
534 struct ib_grh *in_grh,
535 struct ib_mad *in_mad, struct ib_mad *out_mad);
537 static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
540 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
544 * Compare the lower 24 bits of the two values.
545 * Returns an integer <, ==, or > than zero.
547 static inline int ipath_cmp24(u32 a, u32 b)
549 return (((int) a) - ((int) b)) << 8;
552 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
554 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
556 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
558 int ipath_mcast_tree_empty(void);
560 __be32 ipath_compute_aeth(struct ipath_qp *qp);
562 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
564 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
565 struct ib_qp_init_attr *init_attr,
566 struct ib_udata *udata);
568 int ipath_destroy_qp(struct ib_qp *ibqp);
570 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
573 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
574 int attr_mask, struct ib_qp_init_attr *init_attr);
576 void ipath_free_all_qps(struct ipath_qp_table *qpt);
578 int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
580 void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
582 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
584 void ipath_do_rc_send(unsigned long data);
586 void ipath_do_uc_send(unsigned long data);
588 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
590 int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
591 u32 len, u64 vaddr, u32 rkey, int acc);
593 int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
594 struct ib_sge *sge, int acc);
596 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
598 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
600 int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
602 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
603 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
605 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
606 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
608 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc);
610 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
612 void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
613 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
615 int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
616 struct ipath_mregion *mr);
618 void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
620 int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
621 struct ib_sge *sge, int acc);
623 int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
624 u32 len, u64 vaddr, u32 rkey, int acc);
626 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
627 struct ib_recv_wr **bad_wr);
629 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
630 struct ib_srq_init_attr *srq_init_attr,
631 struct ib_udata *udata);
633 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
634 enum ib_srq_attr_mask attr_mask);
636 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
638 int ipath_destroy_srq(struct ib_srq *ibsrq);
640 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
642 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
644 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
645 struct ib_ucontext *context,
646 struct ib_udata *udata);
648 int ipath_destroy_cq(struct ib_cq *ibcq);
650 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
652 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
654 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
656 struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
657 struct ib_phys_buf *buffer_list,
658 int num_phys_buf, int acc, u64 *iova_start);
660 struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
662 struct ib_udata *udata);
664 int ipath_dereg_mr(struct ib_mr *ibmr);
666 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
667 struct ib_fmr_attr *fmr_attr);
669 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
670 int list_len, u64 iova);
672 int ipath_unmap_fmr(struct list_head *fmr_list);
674 int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
676 void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
678 void ipath_insert_rnr_queue(struct ipath_qp *qp);
680 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
682 void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc);
684 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
686 extern const u8 ipath_cvt_physportstate[];
688 extern const int ib_ipath_state_ops[];
690 extern unsigned int ib_ipath_lkey_table_size;
692 extern const u32 ib_ipath_rnr_table[];
694 #endif /* IPATH_VERBS_H */