2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/bitmap.h>
13 #include <linux/slab.h>
14 #include <asm/sn/sn_sal.h>
15 #include <asm/sn/addrs.h>
16 #include <asm/sn/io.h>
17 #include <asm/sn/pcidev.h>
18 #include <asm/sn/pcibus_provider_defs.h>
19 #include <asm/sn/tioca_provider.h>
22 EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
24 LIST_HEAD(tioca_list);
25 EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
27 static int tioca_gart_init(struct tioca_kernel *);
30 * tioca_gart_init - Initialize SGI TIOCA GART
31 * @tioca_common: ptr to common prom/kernel struct identifying the
33 * If the indicated tioca has devices present, initialize its associated
34 * GART MMR's and kernel memory.
37 tioca_gart_init(struct tioca_kernel *tioca_kern)
42 struct tioca_common *tioca_common;
43 struct tioca __iomem *ca_base;
45 tioca_common = tioca_kern->ca_common;
46 ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
48 if (list_empty(tioca_kern->ca_devices))
54 * Validate aperature size
57 switch (CA_APERATURE_SIZE >> 20) {
59 ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
62 ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
65 ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
68 ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
71 ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
74 ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
77 ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
80 ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
83 ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
86 ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
89 ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
92 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
93 "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
98 * Set up other aperature parameters
101 if (PAGE_SIZE >= 16384) {
102 tioca_kern->ca_ap_pagesize = 16384;
103 ap_reg |= CA_GART_PAGE_SIZE;
105 tioca_kern->ca_ap_pagesize = 4096;
108 tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
109 tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
110 tioca_kern->ca_gart_entries =
111 tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
113 ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
114 ap_reg |= tioca_kern->ca_ap_bus_base;
117 * Allocate and set up the GART
120 tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
122 alloc_pages_node(tioca_kern->ca_closest_node,
123 GFP_KERNEL | __GFP_ZERO,
124 get_order(tioca_kern->ca_gart_size));
127 printk(KERN_ERR "%s: Could not allocate "
128 "%llu bytes (order %d) for GART\n",
130 tioca_kern->ca_gart_size,
131 get_order(tioca_kern->ca_gart_size));
135 tioca_kern->ca_gart = page_address(tmp);
136 tioca_kern->ca_gart_coretalk_addr =
137 PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
140 * Compute PCI/AGP convenience fields
143 offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
144 tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
145 tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
146 tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
147 tioca_kern->ca_pcigart_base =
148 tioca_kern->ca_gart_coretalk_addr + offset;
149 tioca_kern->ca_pcigart =
150 &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
151 tioca_kern->ca_pcigart_entries =
152 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
153 tioca_kern->ca_pcigart_pagemap =
154 kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
155 if (!tioca_kern->ca_pcigart_pagemap) {
156 free_pages((unsigned long)tioca_kern->ca_gart,
157 get_order(tioca_kern->ca_gart_size));
161 offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
162 tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
163 tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
164 tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
165 tioca_kern->ca_gfxgart_base =
166 tioca_kern->ca_gart_coretalk_addr + offset;
167 tioca_kern->ca_gfxgart =
168 &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
169 tioca_kern->ca_gfxgart_entries =
170 tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
173 * various control settings:
174 * use agp op-combining
175 * use GET semantics to fetch memory
176 * participate in coherency domain
177 * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
180 __sn_setq_relaxed(&ca_base->ca_control1,
181 CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
182 __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
183 __sn_setq_relaxed(&ca_base->ca_control2,
184 (0x2ull << CA_GART_MEM_PARAM_SHFT));
185 tioca_kern->ca_gart_iscoherent = 1;
186 __sn_clrq_relaxed(&ca_base->ca_control2,
187 (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
190 * Unmask GART fetch error interrupts. Clear residual errors first.
193 writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
194 writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
195 __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
198 * Program the aperature and gart registers in TIOCA
201 writeq(ap_reg, &ca_base->ca_gart_aperature);
202 writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
208 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
209 * @tioca_kernel: structure representing the CA
211 * Given a CA, scan all attached functions making sure they all support
212 * FastWrite. If so, enable FastWrite for all functions and the CA itself.
216 tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
220 struct tioca __iomem *tioca_base;
221 struct pci_dev *pdev;
222 struct tioca_common *common;
224 common = tioca_kern->ca_common;
227 * Scan all vga controllers on this bus making sure they all
228 * support FW. If not, return.
231 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
232 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
235 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
237 return; /* no AGP CAP means no FW */
239 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, ®);
240 if (!(reg & PCI_AGP_STATUS_FW))
241 return; /* function doesn't support FW */
245 * Set fw for all vga fn's
248 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
249 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
252 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
253 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, ®);
254 reg |= PCI_AGP_COMMAND_FW;
255 pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
259 * Set ca's fw to match
262 tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
263 __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
266 EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
269 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
270 * @paddr: system physical address
272 * Map @paddr into 64-bit CA bus space. No device context is necessary.
273 * Bits 53:0 come from the coretalk address. We just need to mask in the
274 * following optional bits of the 64-bit pci address:
276 * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
277 * 0x2 for PIO (non-coherent)
278 * We will always use 0x1
279 * 55:55 - Swap bytes Currently unused
282 tioca_dma_d64(unsigned long paddr)
286 bus_addr = PHYS_TO_TIODMA(paddr);
289 BUG_ON(bus_addr >> 54);
291 /* Set upper nibble to Cache Coherent Memory op */
292 bus_addr |= (1UL << 60);
298 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
299 * @pdev: linux pci_dev representing the function
300 * @paddr: system physical address
302 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
304 * The CA agp 48 bit direct address falls out as follows:
306 * When direct mapping AGP addresses, the 48 bit AGP address is
307 * constructed as follows:
309 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
310 * address [47:40]. The upper 8 node bits are fixed
311 * and come from the xxx register bits [5:0]
312 * [39:38] - Chiplet ID extracted from coretalk address [39:38]
313 * [37:00] - node offset extracted from coretalk address [37:00]
315 * Since the node id in general will be non-zero, and the chiplet id
316 * will always be non-zero, it follows that the device must support
317 * a dma mask of at least 0xffffffffff (40 bits) to target node 0
318 * and in general should be 0xffffffffffff (48 bits) to target nodes
319 * up to 255. Nodes above 255 need the support of the xxx register,
320 * and so a given CA can only directly target nodes in the range
324 tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
326 struct tioca_common *tioca_common;
327 struct tioca __iomem *ca_base;
332 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
334 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
335 ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
337 ct_addr = PHYS_TO_TIODMA(paddr);
341 bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
342 node_upper = ct_addr >> 48;
344 if (node_upper > 64) {
345 printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
346 "of range\n", __func__, (void *)ct_addr);
350 agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
351 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
352 printk(KERN_ERR "%s: coretalk upper node (%u) "
353 "mismatch with ca_agp_dma_addr_extn (%llu)\n",
355 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
363 * tioca_dma_mapped - create a DMA mapping using a CA GART
364 * @pdev: linux pci_dev representing the function
365 * @paddr: host physical address to map
366 * @req_size: len (bytes) to map
368 * Map @paddr into CA address space using the GART mechanism. The mapped
369 * dma_addr_t is guaranteed to be contiguous in CA bus space.
372 tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
374 int ps, ps_shift, entry, entries, mapsize;
375 u64 xio_addr, end_xio_addr;
376 struct tioca_common *tioca_common;
377 struct tioca_kernel *tioca_kern;
378 dma_addr_t bus_addr = 0;
379 struct tioca_dmamap *ca_dmamap;
382 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
384 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
385 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
387 xio_addr = PHYS_TO_TIODMA(paddr);
391 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
394 * allocate a map struct
397 ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
402 * Locate free entries that can hold req_size. Account for
403 * unaligned start/length when allocating.
406 ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
407 ps_shift = ffs(ps) - 1;
408 end_xio_addr = xio_addr + req_size - 1;
410 entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
412 map = tioca_kern->ca_pcigart_pagemap;
413 mapsize = tioca_kern->ca_pcigart_entries;
415 entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
416 if (entry >= mapsize) {
421 bitmap_set(map, entry, entries);
423 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
425 ca_dmamap->cad_dma_addr = bus_addr;
426 ca_dmamap->cad_gart_size = entries;
427 ca_dmamap->cad_gart_entry = entry;
428 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
431 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
432 bus_addr += xio_addr & (ps - 1);
433 xio_addr &= ~(ps - 1);
438 while (xio_addr < end_xio_addr) {
439 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
444 tioca_tlbflush(tioca_kern);
447 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
452 * tioca_dma_unmap - release CA mapping resources
453 * @pdev: linux pci_dev representing the function
454 * @bus_addr: bus address returned by an earlier tioca_dma_map
455 * @dir: mapping direction (unused)
457 * Locate mapping resources associated with @bus_addr and release them.
458 * For mappings created using the direct modes (64 or 48) there are no
459 * resources to release.
462 tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
465 struct tioca_common *tioca_common;
466 struct tioca_kernel *tioca_kern;
467 struct tioca_dmamap *map;
468 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
471 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
472 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
474 /* return straight away if this isn't be a mapped address */
476 if (bus_addr < tioca_kern->ca_pciap_base ||
477 bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
480 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
482 list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
483 if (map->cad_dma_addr == bus_addr)
488 entry = map->cad_gart_entry;
490 for (i = 0; i < map->cad_gart_size; i++, entry++) {
491 clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
492 tioca_kern->ca_pcigart[entry] = 0;
494 tioca_tlbflush(tioca_kern);
496 list_del(&map->cad_list);
497 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
502 * tioca_dma_map - map pages for PCI DMA
503 * @pdev: linux pci_dev representing the function
504 * @paddr: host physical address to map
505 * @byte_count: bytes to map
507 * This is the main wrapper for mapping host physical pages to CA PCI space.
508 * The mapping mode used is based on the devices dma_mask. As a last resort
509 * use the GART mapped mode.
512 tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
517 * Not supported for now ...
519 if (dma_flags & SN_DMA_MSI)
523 * If card is 64 or 48 bit addressable, use a direct mapping. 32
524 * bit direct is so restrictive w.r.t. where the memory resides that
525 * we don't use it even though CA has some support.
528 if (pdev->dma_mask == ~0UL)
529 mapaddr = tioca_dma_d64(paddr);
530 else if (pdev->dma_mask == 0xffffffffffffUL)
531 mapaddr = tioca_dma_d48(pdev, paddr);
535 /* Last resort ... use PCI portion of CA GART */
538 mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
544 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
546 * @arg: pointer to tioca_common struct for the given CA
548 * Handle a CA error interrupt. Simply a wrapper around a SAL call which
549 * defers processing to the SGI prom.
552 tioca_error_intr_handler(int irq, void *arg)
554 struct tioca_common *soft = arg;
555 struct ia64_sal_retval ret_stuff;
558 ret_stuff.status = 0;
561 segment = soft->ca_common.bs_persist_segment;
562 busnum = soft->ca_common.bs_persist_busnum;
564 SAL_CALL_NOLOCK(ret_stuff,
565 (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
566 segment, busnum, 0, 0, 0, 0, 0);
572 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
573 * @prom_bussoft: Common prom/kernel struct representing the bus
575 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
576 * space. Allocates and initializes a kernel-only area for a given CA,
577 * and sets up an irq for handling CA error interrupts.
579 * On successful setup, returns the kernel version of tioca_common back to
583 tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
585 struct tioca_common *tioca_common;
586 struct tioca_kernel *tioca_kern;
589 /* sanity check prom rev */
591 if (is_shub1() && sn_sal_rev() < 0x0406) {
593 (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
594 "for tioca support\n", __func__);
599 * Allocate kernel bus soft and copy from prom.
602 tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
606 memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
607 tioca_common->ca_common.bs_base = (unsigned long)
608 ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
609 sizeof(struct tioca_common));
611 /* init kernel-private area */
613 tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
619 tioca_kern->ca_common = tioca_common;
620 spin_lock_init(&tioca_kern->ca_lock);
621 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
622 tioca_kern->ca_closest_node =
623 nasid_to_cnodeid(tioca_common->ca_closest_nasid);
624 tioca_common->ca_kernel_private = (u64) tioca_kern;
626 bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
627 tioca_common->ca_common.bs_persist_busnum);
629 tioca_kern->ca_devices = &bus->devices;
633 if (tioca_gart_init(tioca_kern) < 0) {
640 list_add(&tioca_kern->ca_list, &tioca_list);
642 if (request_irq(SGI_TIOCA_ERROR,
643 tioca_error_intr_handler,
644 IRQF_SHARED, "TIOCA error", (void *)tioca_common))
646 "%s: Unable to get irq %d. "
647 "Error interrupts won't be routed for TIOCA bus %d\n",
648 __func__, SGI_TIOCA_ERROR,
649 (int)tioca_common->ca_common.bs_persist_busnum);
651 sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
653 /* Setup locality information */
654 controller->node = tioca_kern->ca_closest_node;
658 static struct sn_pcibus_provider tioca_pci_interfaces = {
659 .dma_map = tioca_dma_map,
660 .dma_map_consistent = tioca_dma_map,
661 .dma_unmap = tioca_dma_unmap,
662 .bus_fixup = tioca_bus_fixup,
663 .force_interrupt = NULL,
664 .target_interrupt = NULL
668 * tioca_init_provider - init SN PCI provider ops for TIO CA
671 tioca_init_provider(void)
673 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;