VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
if (!irq)
return -ENOMEM;
- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
- return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
return irq;
}
break;
}
if (filter[i].jt != 0) {
- if (filter[i].jf)
- t_offset += is_near(f_offset) ? 2 : 6;
+ if (filter[i].jf && f_offset)
+ t_offset += is_near(f_offset) ? 2 : 5;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && OF
+ depends on PATA_PLATFORM && OF && OF_IRQ
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
- if (bitmap->mddev->degraded)
- /* Never clear bits or update events_cleared when degraded */
- success = 0;
while (sectors) {
sector_t blocks;
return;
}
- if (success &&
+ if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- } else
- break;
+ }
}
}
}
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else {
+ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
+ set_bit(R5_Insync, &dev->flags);
+ else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Expanded, &dev->flags))
+ /* If we've reshaped into here, we assume it is Insync.
+ * We will shortly update recovery_offset to make
+ * it official.
+ */
+ set_bit(R5_Insync, &dev->flags);
+
if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
{
struct isp_pipeline *pipe =
to_isp_pipeline(&ccdc->video_out.video.entity);
- struct video_device *vdev = &ccdc->subdev.devnode;
+ struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
memset(&event, 0, sizeof(event));
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
- struct video_device *vdev = &stat->subdev.devnode;
+ struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
* Debugfs support for the AB5500 MFD driver
*/
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/mfd/ab5500/ab5500.h>
static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+#ifdef CONFIG_DEBUG_FS
static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
.flags = IORESOURCE_IRQ,
},
};
+#endif
static struct resource __devinitdata ab8500_usb_resources[] = {
{
ret = __adp5520_read(chip->client, reg, ®_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = __adp5520_write(chip->client, reg, reg_val);
}
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __da903x_write(chip->client, reg, reg_val);
}
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ free_irq(client->irq, chip);
kfree(chip);
return 0;
}
*/
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
}
goto out;
}
- data &= mask;
+ data &= ~mask;
err = tps65910_i2c_write(tps65910, reg, 1, &data);
if (err)
dev_err(tps65910->dev, "write to reg %x failed\n", reg);
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/*
* [MSG1]: fill the register address data
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/* [MSG1] fill the register address data */
msg = &twl->xfer_msg[0];
u32 edge_change;
struct mutex irq_lock;
+ char *irq_name;
};
/*----------------------------------------------------------------------*/
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
- return;
+ return IRQ_HANDLED;
}
while (isr) {
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
+ return IRQ_HANDLED;
}
static unsigned twl4030_irq_next;
activate_irq(irq);
}
- status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
- irq_set_chained_handler(irq, handle_twl4030_sih);
+ agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+ status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+ agent->irq_name ?: sih->name, NULL);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
- return status;
+ return status < 0 ? status : irq_base;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
- status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
- "TWL4030-PIH", NULL);
+ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+ IRQF_ONESHOT,
+ "TWL4030-PIH", NULL);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
switch (wm8994->type) {
case WM8958:
+ case WM1811:
ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
if (ret < 0) {
dev_err(dev, "Failed to read power status: %d\n", ret);
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
};
/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
- RTL_W8(Config2, cfg2);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
return msi;
}
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if (rtl_tbi_enabled(tp)) {
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
+ spin_unlock_irqrestore(&chan->lock, flags);
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+ spin_lock_irqsave(&chan->lock, flags);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ // Asus USB Ethernet Adapter
+ USB_DEVICE (0x0b95, 0x7e2b),
+ .driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv);
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+ ath_rc_priv->rate_max_phy = (k > 4) ?
+ ath_rc_priv->valid_rate_index[k-4] :
+ ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
ath_dbg(common, ATH_DBG_CONFIG,
if (ctx->ht.enabled) {
/* if HT40 is used, it should not change
* after associated except channel switch */
- if (iwl_is_associated_ctx(ctx) &&
- !ctx->ht.is_40mhz)
+ if (!ctx->ht.is_40mhz ||
+ !iwl_is_associated_ctx(ctx))
iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
}
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
int ret;
u8 sta_id;
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return 0;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return;
+
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
- if (is_agg)
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
- le16_to_cpu(tx_cmd->len));
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*
ret = -ENODEV;
goto err0;
}
- dwc->revision = reg & DWC3_GSNPSREV_MASK;
+ dwc->revision = reg;
dwc3_core_soft_reset(dwc);
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
- /* Update the ep_comp descriptor if needed */
- if (num_req_streams != ep->max_streams)
- ep_comp->bmAttributes = ep->max_streams;
}
}
#include "isp1760-hcd.h"
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#endif
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct isp1760 {
struct usb_hcd *hcd;
int rst_gpio;
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
+ else if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
else
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
This directory is _NOT_ for adding arbitrary new firmware images. The
place to add those is the separate linux-firmware repository:
- git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
That repository contains all these firmware images which have been
extracted from older drivers, as well various new firmware images which
To submit firmware to that repository, please send either a git binary
diff or preferably a git pull request to:
David Woodhouse <dwmw2@infradead.org>
+ Ben Hutchings <ben@decadent.org.uk>
Your commit should include an update to the WHENCE file clearly
identifying the licence under which the firmware is available, and
struct list_head *fallback;
int ret;
-again:
spin_lock_irqsave(&workers->lock, flags);
+again:
worker = next_worker(workers);
if (!worker) {
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
ret = __btrfs_start_workers(workers);
+ spin_lock_irqsave(&workers->lock, flags);
if (ret)
goto fallback;
goto again;
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
- if (!err) {
- d_instantiate(dentry, inode);
- return 0;
- }
if (err > 0)
err = -EEXIST;
return err;
else {
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
BUG_ON(err);
+ d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
drop_inode = 1;
out_unlock:
+ if (!err)
+ d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
if (drop_inode) {
struct completion *done; /* set if the caller waits */
};
-const char *wb_reason_name[] = {
- [WB_REASON_BACKGROUND] = "background",
- [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages",
- [WB_REASON_SYNC] = "sync",
- [WB_REASON_PERIODIC] = "periodic",
- [WB_REASON_LAPTOP_TIMER] = "laptop_timer",
- [WB_REASON_FREE_MORE_MEM] = "free_more_memory",
- [WB_REASON_FS_FREE_SPACE] = "fs_free_space",
- [WB_REASON_FORKER_THREAD] = "forker_thread"
-};
-
/*
* Include the creation of the trace points after defining the
* wb_writeback_work structure so that the definition remains local to this
#include <linux/spinlock.h>
#include <linux/lockdep.h>
#include <linux/percpu.h>
+#include <linux/cpu.h>
/* can make br locks by using local lock for read side, global lock for write */
#define br_lock_init(name) name##_lock_init()
#define DEFINE_LGLOCK(name) \
\
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+ cpumask_t name##_cpus __read_mostly; \
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
DEFINE_LGLOCK_LOCKDEP(name); \
\
+ static int \
+ name##_lg_cpu_callback(struct notifier_block *nb, \
+ unsigned long action, void *hcpu) \
+ { \
+ switch (action & ~CPU_TASKS_FROZEN) { \
+ case CPU_UP_PREPARE: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_set((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ break; \
+ case CPU_UP_CANCELED: case CPU_DEAD: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ } \
+ return NOTIFY_OK; \
+ } \
+ static struct notifier_block name##_lg_cpu_notifier = { \
+ .notifier_call = name##_lg_cpu_callback, \
+ }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
lock = &per_cpu(name##_lock, i); \
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
} \
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
+ get_online_cpus(); \
+ for_each_online_cpu(i) \
+ cpu_set(i, name##_cpus); \
+ put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
\
void name##_global_lock_online(void) { \
int i; \
- preempt_disable(); \
+ spin_lock(&name##_cpu_lock); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
} \
- preempt_enable(); \
+ spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\
#define DST_NOHASH 0x0008
#define DST_NOCACHE 0x0010
#define DST_NOCOUNT 0x0020
+#define DST_NOPEER 0x0040
short error;
short obsolete;
u8 dir, flow_resolve_t resolver, void *ctx);
extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
extern atomic_t flow_cache_genid;
#endif
* bits is an indicator of when to send and window update SACK.
*/
int rwnd_update_shift;
+
+ /* Threshold for autoclose timeout, in seconds. */
+ unsigned long max_autoclose;
} sctp_globals;
#define sctp_rto_initial (sctp_globals.rto_initial)
#define sctp_auth_enable (sctp_globals.auth_enable)
#define sctp_checksum_disable (sctp_globals.checksum_disable)
#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose (sctp_globals.max_autoclose)
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
/*
* Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
*/
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
- return qsize + skb->truesize > sk->sk_rcvbuf;
+ return qsize > sk->sk_rcvbuf;
}
/* The per-socket spinlock must be held here. */
{I_REFERENCED, "I_REFERENCED"} \
)
+#define WB_WORK_REASON \
+ {WB_REASON_BACKGROUND, "background"}, \
+ {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
+ {WB_REASON_SYNC, "sync"}, \
+ {WB_REASON_PERIODIC, "periodic"}, \
+ {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
+ {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
+ {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
+ {WB_REASON_FORKER_THREAD, "forker_thread"}
+
struct wb_writeback_work;
DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
- wb_reason_name[__entry->reason]
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
- wb_reason_name[__entry->reason])
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
+ )
);
TRACE_EVENT(global_dirty_state,
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+ err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
goto encrypt;
auth:
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return 0;
if (!hci_conn_auth(conn, sec_level, auth_type))
void *ptr = req->data;
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
}
}
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC option.
+ */
+ rfc.mode = chan->mode;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+ BT_ERR("Expected RFC option was not found, using defaults");
+
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
if (list_empty(&s->dlcs)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
}
break;
return NULL;
}
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
+ .mtu = fake_mtu,
};
/*
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER;
rt->dst.ops = &fake_dst_ops;
}
put_online_cpus();
}
+static void flow_cache_flush_task(struct work_struct *work)
+{
+ flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+ schedule_work(&flow_cache_flush_work);
+}
+
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
if (count) {
int i;
- if (count > 1<<30) {
+ if (count > INT_MAX)
+ return -EINVAL;
+ count = roundup_pow_of_two(count);
+ if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+ / sizeof(struct rps_dev_flow)) {
/* Enforce a limit to prevent overflow */
return -EINVAL;
}
- count = roundup_pow_of_two(count);
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
if (!table)
return -ENOMEM;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
- /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
- number of warnings when compiling with -W --ANK
- */
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
}
}
+ /* no point in waiting if we could not bring up at least one device */
+ if (!ic_first_dev)
+ goto have_carrier;
+
/* wait for a carrier on at least one device */
start = jiffies;
while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
static int rt_chain_length_max __read_mostly = 20;
static int redirect_genid;
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
/*
* Interface to generic destination cache.
*/
return ONE;
}
+static void rt_check_expire(void)
+{
+ static unsigned int rover;
+ unsigned int i = rover, goal;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
+ unsigned long samples = 0;
+ unsigned long sum = 0, sum2 = 0;
+ unsigned long delta;
+ u64 mult;
+
+ delta = jiffies - expires_ljiffies;
+ expires_ljiffies = jiffies;
+ mult = ((u64)delta) << rt_hash_log;
+ if (ip_rt_gc_timeout > 1)
+ do_div(mult, ip_rt_gc_timeout);
+ goal = (unsigned int)mult;
+ if (goal > rt_hash_mask)
+ goal = rt_hash_mask + 1;
+ for (; goal > 0; goal--) {
+ unsigned long tmo = ip_rt_gc_timeout;
+ unsigned long length;
+
+ i = (i + 1) & rt_hash_mask;
+ rthp = &rt_hash_table[i].chain;
+
+ if (need_resched())
+ cond_resched();
+
+ samples++;
+
+ if (rcu_dereference_raw(*rthp) == NULL)
+ continue;
+ length = 0;
+ spin_lock_bh(rt_hash_lock_addr(i));
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+ prefetch(rth->dst.rt_next);
+ if (rt_is_expired(rth)) {
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ continue;
+ }
+ if (rth->dst.expires) {
+ /* Entry is expired even if it is in use */
+ if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+ tmo >>= 1;
+ rthp = &rth->dst.rt_next;
+ /*
+ * We only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ length += has_noalias(rt_hash_table[i].chain, rth);
+ continue;
+ }
+ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+ goto nofree;
+
+ /* Cleanup aged off entries. */
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ }
+ spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
+ }
+ rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+ rt_check_expire();
+ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
{
struct rtable *rt = (struct rtable *) dst;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
if (rt->peer == NULL)
rt_bind_peer(rt, rt->rt_dst, 1);
iph->id = htons(inet_getid(rt->peer, more));
return;
}
- } else
+ } else if (!rt)
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
__builtin_return_address(0));
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "gc_interval",
+ .data = &ip_rt_gc_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
devinet_init();
ip_fib_init();
+ INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+ expires_ljiffies = jiffies;
+ schedule_delayed_work(&expires_work,
+ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
static atomic_t ipv6_fragmentation_id;
int old, new;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
if (!rt->rt6i_peer)
copied += used;
len -= used;
+ /* For non stream protcols we get one packet per recvmsg call */
+ if (sk->sk_type != SOCK_STREAM)
+ goto copy_uaddr;
+
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
- /* For non stream protcols we get one packet per recvmsg call */
- if (sk->sk_type != SOCK_STREAM)
- goto copy_uaddr;
-
/* Partial read */
if (used + offset < skb->len)
continue;
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
+
+ if (!(flags & MSG_PEEK)) {
+ sk_eat_skb(sk, skb, 0);
+ *seq = 0;
+ }
+
goto out;
}
break;
}
- if (sinfo->count.to)
+ if (sinfo->count.to >= sinfo->count.from)
return what <= sinfo->count.to && what >= sinfo->count.from;
- else
- return what >= sinfo->count.from;
+ else /* inverted */
+ return what < sinfo->count.to || what > sinfo->count.from;
}
static int connbytes_mt_check(const struct xt_mtchk_param *par)
__u32 timeout)
{
int rc = 0;
- unsigned long completion_rc;
+ long completion_rc;
ndev->req_status = NCI_REQ_PEND;
if (snaplen > res)
snaplen = res;
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize
- < (unsigned)sk->sk_rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
- if (nla_len(opt) < sizeof(*qopt))
+ if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- (unsigned long)sp->autoclose * HZ;
+ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
/* Keep track of how many bytes are in flight to the receiver. */
asoc->outqueue.outstanding_bytes += datasize;
- /* Update our view of the receiver's rwnd. Include sk_buff overhead
- * while updating peer.rwnd so that it reduces the chances of a
- * receiver running out of receive buffer space even when receive
- * window is still open. This can happen when a sender is sending
- * sending small messages.
- */
- datasize += sizeof(struct sk_buff);
+ /* Update our view of the receiver's rwnd. */
if (datasize < rwnd)
rwnd -= datasize;
else
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
}
continue;
}
* (Section 7.2.4)), add the data size of those
* chunks to the rwnd.
*/
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
if (chunk->transport)
transport->flight_size -= sctp_data_size(chunk);
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
+ /* Initialize maximum autoclose timeout. */
+ sctp_max_autoclose = INT_MAX / HZ;
+
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}
static int sack_timer_max = 500;
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
.extra1 = &one,
.extra2 = &rwnd_scale_max,
},
+ {
+ .procname = "max_autoclose",
+ .data = &sctp_max_autoclose,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .extra1 = &max_autoclose_min,
+ .extra2 = &max_autoclose_max,
+ },
{ /* sentinel */ }
};
{
struct dst_entry *head, *next;
- flow_cache_flush();
-
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
head = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = NULL;
}
}
+static void xfrm_garbage_collect(struct net *net)
+{
+ flow_cache_flush();
+ __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+ flow_cache_flush_deferred();
+ __xfrm_garbage_collect(net);
+}
+
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
if (likely(dst_ops->neigh_lookup == NULL))
dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(afinfo->garbage_collect == NULL))
- afinfo->garbage_collect = __xfrm_garbage_collect;
+ afinfo->garbage_collect = xfrm_garbage_collect_deferred;
xfrm_policy_afinfo[afinfo->family] = afinfo;
}
write_unlock_bh(&xfrm_policy_afinfo_lock);
switch (event) {
case NETDEV_DOWN:
- __xfrm_garbage_collect(dev_net(dev));
+ xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
--directory=$(srctree) --directory=$(objtree) \
--output $(obj)/config.pot
$(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
- $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
- $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \
+ $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \
+ $(srctree)/arch/*/um/Kconfig`; \
do \
echo " GEN $$i"; \
$(obj)/kxgettext $$i \
done )
$(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
--output $(obj)/linux.pot
- $(Q)rm -f $(srctree)/arch/um/Kconfig
$(Q)rm -f $(obj)/config.pot
PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
/* AC97 v2.2 specifications says minimum 1 us. */
udelay(2);
gpio_set_value(chip->reset_pin, 1);
+ } else {
+ ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+ udelay(2);
+ ac97c_writel(chip, MR, AC97C_MR_ENA);
}
}