2 * Copyright 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Compatibility file for Linux wireless for kernels 2.6.28.
11 #include <linux/compat.h>
13 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28))
15 #include <linux/usb.h>
17 /* 2.6.28 compat code goes here */
19 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23))
20 #if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
22 * Compat-wireless notes for USB backport stuff:
24 * urb->reject exists on 2.6.27, the poison/unpoison helpers
25 * did not though. The anchor poison does not exist so we cannot use them.
27 * USB anchor poising seems to exist to prevent future driver sumbissions
28 * of usb_anchor_urb() to an anchor marked as poisoned. For older kernels
29 * we cannot use that, so new usb_anchor_urb()s will be anchored. The down
30 * side to this should be submission of URBs will continue being anchored
31 * on an anchor instead of having them being rejected immediately when the
32 * driver realized we needed to stop. For ar9170 we poison URBs upon the
33 * ar9170 mac80211 stop callback(), don't think this should be so bad.
34 * It mean there is period of time in older kernels for which we continue
35 * to anchor new URBs to a known stopped anchor. We have two anchors
41 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
42 * @urb: pointer to URB describing a previously submitted request,
45 * This routine cancels an in-progress request. It is guaranteed that
46 * upon return all completion handlers will have finished and the URB
47 * will be totally idle and cannot be reused. These features make
48 * this an ideal way to stop I/O in a disconnect() callback.
49 * If the request has not already finished or been unlinked
50 * the completion handler will see urb->status == -ENOENT.
52 * After and while the routine runs, attempts to resubmit the URB will fail
53 * with error -EPERM. Thus even if the URB's completion handler always
54 * tries to resubmit, it will not succeed and the URB will become idle.
56 * This routine may not be used in an interrupt context (such as a bottom
57 * half or a completion handler), or when holding a spinlock, or in other
58 * situations where the caller can't schedule().
60 * This routine should not be called by a driver after its disconnect
61 * method has returned.
63 void usb_poison_urb(struct urb *urb)
66 if (!(urb && urb->dev && urb->ep))
68 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
69 spin_lock_irq(&usb_reject_lock);
72 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
73 spin_unlock_irq(&usb_reject_lock);
76 * XXX: usb_hcd_unlink_urb() needs backporting... this is defined
77 * on usb hcd.c but urb.c gets access to it. That is, older kernels
78 * have usb_hcd_unlink_urb() but its not exported, nor can we
79 * re-implement it exactly. This essentially dequeues the urb from
80 * hw, we need to figure out a way to backport this.
82 //usb_hcd_unlink_urb(urb, -ENOENT);
84 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
86 EXPORT_SYMBOL_GPL(usb_poison_urb);
88 #endif /* CONFIG_USB */
90 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
92 #include <pcmcia/ds.h>
93 struct pcmcia_cfg_mem {
97 cistpl_cftable_entry_t dflt;
100 * pcmcia_loop_config() - loop over configuration options
101 * @p_dev: the struct pcmcia_device which we need to loop for.
102 * @conf_check: function to call for each configuration option.
103 * It gets passed the struct pcmcia_device, the CIS data
104 * describing the configuration option, and private data
105 * being passed to pcmcia_loop_config()
106 * @priv_data: private data to be passed to the conf_check function.
108 * pcmcia_loop_config() loops over all configuration options, and calls
109 * the driver-specific conf_check() for each one, checking whether
110 * it is a valid one. Returns 0 on success or errorcode otherwise.
112 int pcmcia_loop_config(struct pcmcia_device *p_dev,
113 int (*conf_check) (struct pcmcia_device *p_dev,
114 cistpl_cftable_entry_t *cfg,
115 cistpl_cftable_entry_t *dflt,
120 struct pcmcia_cfg_mem *cfg_mem;
126 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
130 /* get the current Vcc setting */
131 vcc = p_dev->socket->socket.Vcc;
133 tuple = &cfg_mem->tuple;
134 tuple->TupleData = cfg_mem->buf;
135 tuple->TupleDataMax = 255;
136 tuple->TupleOffset = 0;
137 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
138 tuple->Attributes = 0;
140 ret = pcmcia_get_first_tuple(p_dev, tuple);
142 cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry;
144 if (pcmcia_get_tuple_data(p_dev, tuple))
147 if (pcmcia_parse_tuple(tuple, &cfg_mem->parse))
151 p_dev->conf.ConfigIndex = cfg->index;
152 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
153 cfg_mem->dflt = *cfg;
155 ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data);
160 ret = pcmcia_get_next_tuple(p_dev, tuple);
165 EXPORT_SYMBOL(pcmcia_loop_config);
167 #endif /* CONFIG_PCMCIA */
169 #if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
171 void usb_unpoison_urb(struct urb *urb)
173 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
180 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
181 spin_lock_irqsave(&usb_reject_lock, flags);
184 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
185 spin_unlock_irqrestore(&usb_reject_lock, flags);
188 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
193 * usb_poison_anchored_urbs - cease all traffic from an anchor
194 * @anchor: anchor the requests are bound to
196 * this allows all outstanding URBs to be poisoned starting
197 * from the back of the queue. Newly added URBs will also be
200 * This routine should not be called by a driver after its disconnect
201 * method has returned.
203 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
207 spin_lock_irq(&anchor->lock);
208 // anchor->poisoned = 1; /* XXX: Cannot backport */
209 while (!list_empty(&anchor->urb_list)) {
210 victim = list_entry(anchor->urb_list.prev, struct urb,
212 /* we must make sure the URB isn't freed before we kill it*/
214 spin_unlock_irq(&anchor->lock);
215 /* this will unanchor the URB */
216 usb_poison_urb(victim);
218 spin_lock_irq(&anchor->lock);
220 spin_unlock_irq(&anchor->lock);
222 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
226 * usb_get_from_anchor - get an anchor's oldest urb
227 * @anchor: the anchor whose urb you want
229 * this will take the oldest urb from an anchor,
230 * unanchor and return it
232 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
237 spin_lock_irqsave(&anchor->lock, flags);
238 if (!list_empty(&anchor->urb_list)) {
239 victim = list_entry(anchor->urb_list.next, struct urb,
242 spin_unlock_irqrestore(&anchor->lock, flags);
243 usb_unanchor_urb(victim);
245 spin_unlock_irqrestore(&anchor->lock, flags);
252 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
255 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
256 * @anchor: the anchor whose urbs you want to unanchor
258 * use this to get rid of all an anchor's urbs
260 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
265 spin_lock_irqsave(&anchor->lock, flags);
266 while (!list_empty(&anchor->urb_list)) {
267 victim = list_entry(anchor->urb_list.prev, struct urb,
270 spin_unlock_irqrestore(&anchor->lock, flags);
271 /* this may free the URB */
272 usb_unanchor_urb(victim);
274 spin_lock_irqsave(&anchor->lock, flags);
276 spin_unlock_irqrestore(&anchor->lock, flags);
279 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
282 * usb_anchor_empty - is an anchor empty
283 * @anchor: the anchor you want to query
285 * returns 1 if the anchor has no urbs associated with it
287 int usb_anchor_empty(struct usb_anchor *anchor)
289 return list_empty(&anchor->urb_list);
292 EXPORT_SYMBOL_GPL(usb_anchor_empty);
293 #endif /* CONFIG_USB */
296 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
299 * Make sure the BAR is actually a memory resource, not an IO resource
301 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
305 return ioremap_nocache(pci_resource_start(pdev, bar),
306 pci_resource_len(pdev, bar));
308 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
310 static unsigned long round_jiffies_common(unsigned long j, int cpu,
314 unsigned long original = j;
317 * We don't want all cpus firing their timers at once hitting the
318 * same lock or cachelines, so we skew each extra cpu with an extra
319 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
321 * The skew is done by adding 3*cpunr, then round, then subtract this
322 * extra offset again.
329 * If the target jiffie is just after a whole second (which can happen
330 * due to delays of the timer irq, long irq off times etc etc) then
331 * we should round down to the whole second, not up. Use 1/4th second
332 * as cutoff for this rounding as an extreme upper bound for this.
333 * But never round down if @force_up is set.
335 if (rem < HZ/4 && !force_up) /* round down */
340 /* now that we have rounded, subtract the extra skew again */
343 if (j <= jiffies) /* rounding ate our timeout entirely; */
349 * round_jiffies_up - function to round jiffies up to a full second
350 * @j: the time in (absolute) jiffies that should be rounded
352 * This is the same as round_jiffies() except that it will never
353 * round down. This is useful for timeouts for which the exact time
354 * of firing does not matter too much, as long as they don't fire too
357 unsigned long round_jiffies_up(unsigned long j)
359 return round_jiffies_common(j, raw_smp_processor_id(), true);
361 EXPORT_SYMBOL_GPL(round_jiffies_up);
363 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
366 skb_fill_page_desc(skb, i, page, off, size);
368 skb->data_len += size;
369 skb->truesize += size;
371 EXPORT_SYMBOL(skb_add_rx_frag);
373 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) */