2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
29 #define BCM47162_DMP() ((CHIPID(sih->chip) == BCM47162_CHIP_ID) && \
30 (CHIPREV(sih->chiprev) == 0) && \
31 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
36 get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
39 uint inv = 0, nom = 0;
42 ent = R_REG(si_osh(sih), *eromptr);
48 if ((ent & ER_VALID) == 0) {
53 if (ent == (ER_END | ER_VALID))
56 if ((ent & mask) == match)
62 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
64 SI_VMSG((" after %d invalid and %d non-matching entries\n",
71 get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
72 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
76 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
77 if (((asd & ER_TAG1) != ER_ADD) ||
78 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
79 ((asd & AD_ST_MASK) != st)) {
80 /* This is not what we want, "push" it back */
84 *addrl = asd & AD_ADDR_MASK;
86 *addrh = get_erom_ent(sih, eromptr, 0, 0);
90 sz = asd & AD_SZ_MASK;
91 if (sz == AD_SZ_SZD) {
92 szd = get_erom_ent(sih, eromptr, 0, 0);
93 *sizel = szd & SD_SZ_MASK;
95 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
97 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
99 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
100 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
105 static void ai_hwfixup(si_info_t *sii)
109 /* parse the enumeration rom to identify all cores */
110 void ai_scan(si_t *sih, void *regs, uint devid)
112 si_info_t *sii = SI_INFO(sih);
113 chipcregs_t *cc = (chipcregs_t *) regs;
114 u32 erombase, *eromptr, *eromlim;
116 erombase = R_REG(sii->osh, &cc->eromptr);
118 switch (BUSTYPE(sih->bustype)) {
120 eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
124 /* Set wrappers address */
125 sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
127 /* Now point the window at the erom */
128 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
136 eromptr = (u32 *)(unsigned long)erombase;
140 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
145 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
147 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
148 while (eromptr < eromlim) {
149 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
150 u32 mpd, asd, addrl, addrh, sizel, sizeh;
157 /* Grok a component */
158 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
159 if (cia == (ER_END | ER_VALID)) {
160 SI_VMSG(("Found END of erom after %d cores\n",
166 cib = get_erom_ent(sih, &eromptr, 0, 0);
168 if ((cib & ER_TAG) != ER_CI) {
169 SI_ERROR(("CIA not followed by CIB\n"));
173 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
174 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
175 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
176 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
177 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
178 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
179 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
181 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
183 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
185 if ((nmw + nsw == 0)) {
186 /* A component which is not a core */
187 if (cid == OOB_ROUTER_CORE_ID) {
188 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
189 &addrl, &addrh, &sizel, &sizeh);
191 sii->oob_router = addrl;
198 /* sii->eromptr[idx] = base; */
201 sii->coreid[idx] = cid;
203 for (i = 0; i < nmp; i++) {
204 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
205 if ((mpd & ER_TAG) != ER_MP) {
206 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
209 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
210 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
211 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
214 /* First Slave Address Descriptor should be port 0:
215 * the main register space for the core
218 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
221 /* Try again to see if it is a bridge */
223 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
224 &addrh, &sizel, &sizeh);
227 else if ((addrh != 0) || (sizeh != 0)
228 || (sizel != SI_CORE_SIZE)) {
229 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
233 sii->coresba[idx] = addrl;
234 sii->coresba_size[idx] = sizel;
235 /* Get any more ASDs in port 0 */
239 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
240 &addrh, &sizel, &sizeh);
241 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
242 sii->coresba2[idx] = addrl;
243 sii->coresba2_size[idx] = sizel;
248 /* Go through the ASDs for other slave ports */
249 for (i = 1; i < nsp; i++) {
253 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
254 &addrl, &addrh, &sizel, &sizeh);
257 SI_ERROR((" SP %d has no address descriptors\n",
263 /* Now get master wrappers */
264 for (i = 0; i < nmw; i++) {
266 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
267 &addrh, &sizel, &sizeh);
269 SI_ERROR(("Missing descriptor for MW %d\n", i));
272 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
273 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
277 sii->wrapba[idx] = addrl;
280 /* And finally slave wrappers */
281 for (i = 0; i < nsw; i++) {
282 uint fwp = (nsp == 1) ? 0 : 1;
284 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
285 &addrl, &addrh, &sizel, &sizeh);
287 SI_ERROR(("Missing descriptor for SW %d\n", i));
290 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
291 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
294 if ((nmw == 0) && (i == 0))
295 sii->wrapba[idx] = addrl;
298 /* Don't record bridges */
306 SI_ERROR(("Reached end of erom without finding END"));
313 /* This function changes the logical "focus" to the indicated core.
314 * Return the current core's virtual address.
316 void *ai_setcoreidx(si_t *sih, uint coreidx)
318 si_info_t *sii = SI_INFO(sih);
319 u32 addr = sii->coresba[coreidx];
320 u32 wrap = sii->wrapba[coreidx];
323 if (coreidx >= sii->numcores)
327 * If the user has provided an interrupt mask enabled function,
328 * then assert interrupts are disabled before switching the core.
330 ASSERT((sii->intrsenabled_fn == NULL)
331 || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
333 switch (BUSTYPE(sih->bustype)) {
336 if (!sii->regs[coreidx]) {
337 sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
338 ASSERT(GOODREGS(sii->regs[coreidx]));
340 sii->curmap = regs = sii->regs[coreidx];
341 if (!sii->wrappers[coreidx]) {
342 sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
343 ASSERT(GOODREGS(sii->wrappers[coreidx]));
345 sii->curwrap = sii->wrappers[coreidx];
349 /* point bar0 window */
350 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
352 /* point bar0 2nd 4KB window */
353 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
360 sii->curmap = regs = (void *)(unsigned long)addr;
361 sii->curwrap = (void *)(unsigned long)wrap;
371 sii->curidx = coreidx;
376 /* Return the number of address spaces in current core */
377 int ai_numaddrspaces(si_t *sih)
382 /* Return the address of the nth address space in the current core */
383 u32 ai_addrspace(si_t *sih, uint asidx)
392 return sii->coresba[cidx];
394 return sii->coresba2[cidx];
396 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
401 /* Return the size of the nth address space in the current core */
402 u32 ai_addrspacesize(si_t *sih, uint asidx)
411 return sii->coresba_size[cidx];
413 return sii->coresba2_size[cidx];
415 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
420 uint ai_flag(si_t *sih)
426 if (BCM47162_DMP()) {
427 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
432 return R_REG(sii->osh, &ai->oobselouta30) & 0x1f;
435 void ai_setint(si_t *sih, int siflag)
439 void ai_write_wrap_reg(si_t *sih, u32 offset, u32 val)
441 si_info_t *sii = SI_INFO(sih);
442 u32 *w = (u32 *) sii->curwrap;
443 W_REG(sii->osh, w + (offset / 4), val);
447 uint ai_corevendor(si_t *sih)
453 cia = sii->cia[sii->curidx];
454 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
457 uint ai_corerev(si_t *sih)
463 cib = sii->cib[sii->curidx];
464 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
467 bool ai_iscoreup(si_t *sih)
475 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
477 && ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
481 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
482 * switch back to the original core, and return the new value.
484 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
486 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
487 * and (on newer pci cores) chipcommon registers.
489 uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
500 ASSERT(GOODIDX(coreidx));
501 ASSERT(regoff < SI_CORE_SIZE);
502 ASSERT((val & ~mask) == 0);
504 if (coreidx >= SI_MAXCORES)
507 if (BUSTYPE(sih->bustype) == SI_BUS) {
508 /* If internal bus, we can always get at everything */
510 /* map if does not exist */
511 if (!sii->regs[coreidx]) {
512 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
514 ASSERT(GOODREGS(sii->regs[coreidx]));
516 r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
517 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
518 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
520 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
521 /* Chipc registers are mapped at 12KB */
524 r = (u32 *) ((char *)sii->curmap +
525 PCI_16KB0_CCREGS_OFFSET + regoff);
526 } else if (sii->pub.buscoreidx == coreidx) {
527 /* pci registers are at either in the last 2KB of an 8KB window
528 * or, in pcie and pci rev 13 at 8KB
532 r = (u32 *) ((char *)sii->curmap +
533 PCI_16KB0_PCIREGS_OFFSET +
536 r = (u32 *) ((char *)sii->curmap +
537 ((regoff >= SBCONFIGOFF) ?
538 PCI_BAR0_PCISBR_OFFSET :
539 PCI_BAR0_PCIREGS_OFFSET) +
545 INTR_OFF(sii, intr_val);
547 /* save current core index */
548 origidx = si_coreidx(&sii->pub);
551 r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx) +
558 w = (R_REG(sii->osh, r) & ~mask) | val;
559 W_REG(sii->osh, r, w);
563 w = R_REG(sii->osh, r);
566 /* restore core index */
567 if (origidx != coreidx)
568 ai_setcoreidx(&sii->pub, origidx);
570 INTR_RESTORE(sii, intr_val);
576 void ai_core_disable(si_t *sih, u32 bits)
584 ASSERT(GOODREGS(sii->curwrap));
587 /* if core is already in reset, just return */
588 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
591 W_REG(sii->osh, &ai->ioctrl, bits);
592 dummy = R_REG(sii->osh, &ai->ioctrl);
595 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
599 /* reset and re-enable a core
601 * bits - core specific bits that are set during and after reset sequence
602 * resetbits - core specific bits that are set only during reset sequence
604 void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
611 ASSERT(GOODREGS(sii->curwrap));
615 * Must do the disable sequence first to work for arbitrary current core state.
617 ai_core_disable(sih, (bits | resetbits));
620 * Now do the initialization sequence.
622 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
623 dummy = R_REG(sii->osh, &ai->ioctrl);
624 W_REG(sii->osh, &ai->resetctrl, 0);
627 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
628 dummy = R_REG(sii->osh, &ai->ioctrl);
632 void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
640 if (BCM47162_DMP()) {
641 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
646 ASSERT(GOODREGS(sii->curwrap));
649 ASSERT((val & ~mask) == 0);
652 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
653 W_REG(sii->osh, &ai->ioctrl, w);
657 u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
664 if (BCM47162_DMP()) {
665 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
670 ASSERT(GOODREGS(sii->curwrap));
673 ASSERT((val & ~mask) == 0);
676 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
677 W_REG(sii->osh, &ai->ioctrl, w);
680 return R_REG(sii->osh, &ai->ioctrl);
683 u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
690 if (BCM47162_DMP()) {
691 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
695 ASSERT(GOODREGS(sii->curwrap));
698 ASSERT((val & ~mask) == 0);
699 ASSERT((mask & ~SISF_CORE_BITS) == 0);
702 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
703 W_REG(sii->osh, &ai->iostatus, w);
706 return R_REG(sii->osh, &ai->iostatus);