Pull acpica into release branch
[pandora-kernel.git] / drivers / infiniband / hw / ipath / ipath_stats.c
1 /*
2  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/pci.h>
34
35 #include "ipath_kernel.h"
36
37 struct infinipath_stats ipath_stats;
38
39 /**
40  * ipath_snap_cntr - snapshot a chip counter
41  * @dd: the infinipath device
42  * @creg: the counter to snapshot
43  *
44  * called from add_timer and user counter read calls, to deal with
45  * counters that wrap in "human time".  The words sent and received, and
46  * the packets sent and received are all that we worry about.  For now,
47  * at least, we don't worry about error counters, because if they wrap
48  * that quickly, we probably don't care.  We may eventually just make this
49  * handle all the counters.  word counters can wrap in about 20 seconds
50  * of full bandwidth traffic, packet counters in a few hours.
51  */
52
53 u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
54 {
55         u32 val, reg64 = 0;
56         u64 val64;
57         unsigned long t0, t1;
58         u64 ret;
59
60         t0 = jiffies;
61         /* If fast increment counters are only 32 bits, snapshot them,
62          * and maintain them as 64bit values in the driver */
63         if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
64             (creg == dd->ipath_cregs->cr_wordsendcnt ||
65              creg == dd->ipath_cregs->cr_wordrcvcnt ||
66              creg == dd->ipath_cregs->cr_pktsendcnt ||
67              creg == dd->ipath_cregs->cr_pktrcvcnt)) {
68                 val64 = ipath_read_creg(dd, creg);
69                 val = val64 == ~0ULL ? ~0U : 0;
70                 reg64 = 1;
71         } else                  /* val64 just to keep gcc quiet... */
72                 val64 = val = ipath_read_creg32(dd, creg);
73         /*
74          * See if a second has passed.  This is just a way to detect things
75          * that are quite broken.  Normally this should take just a few
76          * cycles (the check is for long enough that we don't care if we get
77          * pre-empted.)  An Opteron HT O read timeout is 4 seconds with
78          * normal NB values
79          */
80         t1 = jiffies;
81         if (time_before(t0 + HZ, t1) && val == -1) {
82                 ipath_dev_err(dd, "Error!  Read counter 0x%x timed out\n",
83                               creg);
84                 ret = 0ULL;
85                 goto bail;
86         }
87         if (reg64) {
88                 ret = val64;
89                 goto bail;
90         }
91
92         if (creg == dd->ipath_cregs->cr_wordsendcnt) {
93                 if (val != dd->ipath_lastsword) {
94                         dd->ipath_sword += val - dd->ipath_lastsword;
95                         dd->ipath_lastsword = val;
96                 }
97                 val64 = dd->ipath_sword;
98         } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
99                 if (val != dd->ipath_lastrword) {
100                         dd->ipath_rword += val - dd->ipath_lastrword;
101                         dd->ipath_lastrword = val;
102                 }
103                 val64 = dd->ipath_rword;
104         } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
105                 if (val != dd->ipath_lastspkts) {
106                         dd->ipath_spkts += val - dd->ipath_lastspkts;
107                         dd->ipath_lastspkts = val;
108                 }
109                 val64 = dd->ipath_spkts;
110         } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
111                 if (val != dd->ipath_lastrpkts) {
112                         dd->ipath_rpkts += val - dd->ipath_lastrpkts;
113                         dd->ipath_lastrpkts = val;
114                 }
115                 val64 = dd->ipath_rpkts;
116         } else
117                 val64 = (u64) val;
118
119         ret = val64;
120
121 bail:
122         return ret;
123 }
124
125 /**
126  * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
127  * @dd: the infinipath device
128  *
129  * print the delta of egrfull/hdrqfull errors for kernel ports no more than
130  * every 5 seconds.  User processes are printed at close, but kernel doesn't
131  * close, so...  Separate routine so may call from other places someday, and
132  * so function name when printed by _IPATH_INFO is meaningfull
133  */
134 static void ipath_qcheck(struct ipath_devdata *dd)
135 {
136         static u64 last_tot_hdrqfull;
137         size_t blen = 0;
138         char buf[128];
139
140         *buf = 0;
141         if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
142                 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
143                                 dd->ipath_pd[0]->port_hdrqfull -
144                                 dd->ipath_p0_hdrqfull);
145                 dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
146         }
147         if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
148                 blen += snprintf(buf + blen, sizeof buf - blen,
149                                  "%srcvegrfull %llu",
150                                  blen ? ", " : "",
151                                  (unsigned long long)
152                                  (ipath_stats.sps_etidfull -
153                                   dd->ipath_last_tidfull));
154                 dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
155         }
156
157         /*
158          * this is actually the number of hdrq full interrupts, not actual
159          * events, but at the moment that's mostly what I'm interested in.
160          * Actual count, etc. is in the counters, if needed.  For production
161          * users this won't ordinarily be printed.
162          */
163
164         if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
165             ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
166                 blen += snprintf(buf + blen, sizeof buf - blen,
167                                  "%shdrqfull %llu (all ports)",
168                                  blen ? ", " : "",
169                                  (unsigned long long)
170                                  (ipath_stats.sps_hdrqfull -
171                                   last_tot_hdrqfull));
172                 last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
173         }
174         if (blen)
175                 ipath_dbg("%s\n", buf);
176
177         if (dd->ipath_port0head != (u32)
178             le64_to_cpu(*dd->ipath_hdrqtailptr)) {
179                 if (dd->ipath_lastport0rcv_cnt ==
180                     ipath_stats.sps_port0pkts) {
181                         ipath_cdbg(PKT, "missing rcv interrupts? "
182                                    "port0 hd=%llx tl=%x; port0pkts %llx\n",
183                                    (unsigned long long)
184                                    le64_to_cpu(*dd->ipath_hdrqtailptr),
185                                    dd->ipath_port0head,
186                                    (unsigned long long)
187                                    ipath_stats.sps_port0pkts);
188                         ipath_kreceive(dd);
189                 }
190                 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
191         }
192 }
193
194 /**
195  * ipath_get_faststats - get word counters from chip before they overflow
196  * @opaque - contains a pointer to the infinipath device ipath_devdata
197  *
198  * called from add_timer
199  */
200 void ipath_get_faststats(unsigned long opaque)
201 {
202         struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
203         u32 val;
204         static unsigned cnt;
205
206         /*
207          * don't access the chip while running diags, or memory diags can
208          * fail
209          */
210         if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) ||
211             ipath_diag_inuse)
212                 /* but re-arm the timer, for diags case; won't hurt other */
213                 goto done;
214
215         if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
216                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
217                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
218                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
219                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
220         }
221
222         ipath_qcheck(dd);
223
224         /*
225          * deal with repeat error suppression.  Doesn't really matter if
226          * last error was almost a full interval ago, or just a few usecs
227          * ago; still won't get more than 2 per interval.  We may want
228          * longer intervals for this eventually, could do with mod, counter
229          * or separate timer.  Also see code in ipath_handle_errors() and
230          * ipath_handle_hwerrors().
231          */
232
233         if (dd->ipath_lasterror)
234                 dd->ipath_lasterror = 0;
235         if (dd->ipath_lasthwerror)
236                 dd->ipath_lasthwerror = 0;
237         if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
238             && time_after(jiffies, dd->ipath_unmasktime)) {
239                 char ebuf[256];
240                 ipath_decode_err(ebuf, sizeof ebuf,
241                                  (dd->ipath_maskederrs & ~dd->
242                                   ipath_ignorederrs));
243                 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
244                     ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
245                         ipath_dev_err(dd, "Re-enabling masked errors "
246                                       "(%s)\n", ebuf);
247                 else {
248                         /*
249                          * rcvegrfull and rcvhdrqfull are "normal", for some
250                          * types of processes (mostly benchmarks) that send
251                          * huge numbers of messages, while not processing
252                          * them.  So only complain about these at debug
253                          * level.
254                          */
255                         ipath_dbg("Disabling frequent queue full errors "
256                                   "(%s)\n", ebuf);
257                 }
258                 dd->ipath_maskederrs = dd->ipath_ignorederrs;
259                 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
260                                  ~dd->ipath_maskederrs);
261         }
262
263         /* limit qfull messages to ~one per minute per port */
264         if ((++cnt & 0x10)) {
265                 for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
266                      val--) {
267                         if (dd->ipath_lastegrheads[val] != -1)
268                                 dd->ipath_lastegrheads[val] = -1;
269                         if (dd->ipath_lastrcvhdrqtails[val] != -1)
270                                 dd->ipath_lastrcvhdrqtails[val] = -1;
271                 }
272         }
273
274         if (dd->ipath_nosma_bufs) {
275                 dd->ipath_nosma_secs += 5;
276                 if (dd->ipath_nosma_secs >= 30) {
277                         ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
278                                    "cancelling pending sends\n",
279                                    dd->ipath_nosma_secs);
280                         /*
281                          * issue an abort as well, in case we have a packet
282                          * stuck in launch fifo.  This could corrupt an
283                          * outgoing user packet in the worst case,
284                          * but this is a pretty catastrophic, anyway.
285                          */
286                         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
287                                          INFINIPATH_S_ABORT);
288                         ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
289                                              dd->ipath_piobcnt2k +
290                                              dd->ipath_piobcnt4k -
291                                              dd->ipath_lastport_piobuf);
292                         /* start again, if necessary */
293                         dd->ipath_nosma_secs = 0;
294                 } else
295                         ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
296                                    "after %u seconds\n",
297                                    dd->ipath_nosma_bufs,
298                                    dd->ipath_nosma_secs);
299         }
300
301 done:
302         mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
303 }