Merge branch 'stable-3.2' into pandora-3.2
authorGrazvydas Ignotas <notasas@gmail.com>
Sun, 15 May 2016 12:42:35 +0000 (15:42 +0300)
committerGrazvydas Ignotas <notasas@gmail.com>
Sun, 15 May 2016 12:42:35 +0000 (15:42 +0300)
1  2 
drivers/mtd/ubi/upd.c
fs/exec.c
fs/open.c
fs/splice.c
kernel/sched.c
kernel/trace/trace.c
mm/memory.c
net/mac80211/rc80211_minstrel_ht.c

diff --combined drivers/mtd/ubi/upd.c
@@@ -151,7 -151,7 +151,7 @@@ int ubi_start_update(struct ubi_device 
        }
  
        if (bytes == 0) {
 -              err = ubi_wl_flush(ubi);
 +              err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
                if (err)
                        return err;
  
@@@ -188,14 -188,16 +188,14 @@@ int ubi_start_leb_change(struct ubi_dev
        dbg_gen("start changing LEB %d:%d, %u bytes",
                vol->vol_id, req->lnum, req->bytes);
        if (req->bytes == 0)
 -              return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
 -                                               req->dtype);
 +              return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
  
        vol->upd_bytes = req->bytes;
        vol->upd_received = 0;
        vol->changing_leb = 1;
        vol->ch_lnum = req->lnum;
 -      vol->ch_dtype = req->dtype;
  
-       vol->upd_buf = vmalloc(req->bytes);
+       vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
        if (!vol->upd_buf)
                return -ENOMEM;
  
@@@ -246,7 -248,8 +246,7 @@@ static int write_leb(struct ubi_device 
                        return 0;
                }
  
 -              err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
 -                                      UBI_UNKNOWN);
 +              err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
        } else {
                /*
                 * When writing static volume, and this is the last logical
                 * contain zeros, not random trash.
                 */
                memset(buf + len, 0, vol->usable_leb_size - len);
 -              err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
 -                                         UBI_UNKNOWN, used_ebs);
 +              err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
        }
  
        return err;
@@@ -363,7 -367,7 +363,7 @@@ int ubi_more_update_data(struct ubi_dev
  
        ubi_assert(vol->upd_received <= vol->upd_bytes);
        if (vol->upd_received == vol->upd_bytes) {
 -              err = ubi_wl_flush(ubi);
 +              err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
                if (err)
                        return err;
                /* The update is finished, clear the update marker */
@@@ -419,7 -423,7 +419,7 @@@ int ubi_more_leb_change_data(struct ubi
                       len - vol->upd_bytes);
                len = ubi_calc_data_len(ubi, vol->upd_buf, len);
                err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
 -                                              vol->upd_buf, len, UBI_UNKNOWN);
 +                                              vol->upd_buf, len);
                if (err)
                        return err;
        }
diff --combined fs/exec.c
+++ b/fs/exec.c
@@@ -55,6 -55,9 +55,9 @@@
  #include <linux/pipe_fs_i.h>
  #include <linux/oom.h>
  #include <linux/compat.h>
+ #include <linux/sched.h>
+ #include <linux/fs.h>
+ #include <linux/path.h>
  
  #include <asm/uaccess.h>
  #include <asm/mmu_context.h>
@@@ -2115,8 -2118,8 +2118,8 @@@ static int umh_pipe_setup(struct subpro
        fd_install(0, rp);
        spin_lock(&cf->file_lock);
        fdt = files_fdtable(cf);
 -      FD_SET(0, fdt->open_fds);
 -      FD_CLR(0, fdt->close_on_exec);
 +      __set_open_fd(0, fdt);
 +      __clear_close_on_exec(0, fdt);
        spin_unlock(&cf->file_lock);
  
        /* and disallow core files too */
@@@ -2246,6 -2249,8 +2249,8 @@@ void do_coredump(long signr, int exit_c
                }
        } else {
                struct inode *inode;
+               int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
+                                O_LARGEFILE | O_EXCL;
  
                if (cprm.limit < binfmt->min_coredump)
                        goto fail_unlock;
                 * what matters is that at least one of the two processes
                 * writes its coredump successfully, not which one.
                 */
-               cprm.file = filp_open(cn.corename,
-                                O_CREAT | 2 | O_NOFOLLOW |
-                                O_LARGEFILE | O_EXCL,
-                                0600);
+               if (need_suid_safe) {
+                       /*
+                        * Using user namespaces, normal user tasks can change
+                        * their current->fs->root to point to arbitrary
+                        * directories. Since the intention of the "only dump
+                        * with a fully qualified path" rule is to control where
+                        * coredumps may be placed using root privileges,
+                        * current->fs->root must not be used. Instead, use the
+                        * root directory of init_task.
+                        */
+                       struct path root;
+                       task_lock(&init_task);
+                       get_fs_root(init_task.fs, &root);
+                       task_unlock(&init_task);
+                       cprm.file = file_open_root(root.dentry, root.mnt,
+                               cn.corename, open_flags, 0600);
+                       path_put(&root);
+               } else {
+                       cprm.file = filp_open(cn.corename, open_flags, 0600);
+               }
                if (IS_ERR(cprm.file))
                        goto fail_unlock;
  
diff --combined fs/open.c
+++ b/fs/open.c
@@@ -60,7 -60,6 +60,7 @@@ int do_truncate(struct dentry *dentry, 
        mutex_unlock(&dentry->d_inode->i_mutex);
        return ret;
  }
 +EXPORT_SYMBOL(do_truncate);
  
  static long do_sys_truncate(const char __user *pathname, loff_t length)
  {
@@@ -837,7 -836,7 +837,7 @@@ EXPORT_SYMBOL(dentry_open)
  static void __put_unused_fd(struct files_struct *files, unsigned int fd)
  {
        struct fdtable *fdt = files_fdtable(files);
 -      __FD_CLR(fd, fdt->open_fds);
 +      __clear_open_fd(fd, fdt);
        if (fd < files->next_fd)
                files->next_fd = fd;
  }
@@@ -959,12 -958,10 +959,10 @@@ struct file *filp_open(const char *file
  EXPORT_SYMBOL(filp_open);
  
  struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
-                           const char *filename, int flags)
+                           const char *filename, int flags, umode_t mode)
  {
        struct open_flags op;
-       int lookup = build_open_flags(flags, 0, &op);
-       if (flags & O_CREAT)
-               return ERR_PTR(-EINVAL);
+       int lookup = build_open_flags(flags, mode, &op);
        if (!filename && (flags & O_DIRECTORY))
                if (!dentry->d_inode->i_op->lookup)
                        return ERR_PTR(-ENOTDIR);
@@@ -1082,7 -1079,7 +1080,7 @@@ SYSCALL_DEFINE1(close, unsigned int, fd
        if (!filp)
                goto out_unlock;
        rcu_assign_pointer(fdt->fd[fd], NULL);
 -      FD_CLR(fd, fdt->close_on_exec);
 +      __clear_close_on_exec(fd, fdt);
        __put_unused_fd(files, fd);
        spin_unlock(&files->file_lock);
        retval = filp_close(filp, files);
diff --combined fs/splice.c
@@@ -188,6 -188,9 +188,9 @@@ ssize_t splice_to_pipe(struct pipe_inod
        unsigned int spd_pages = spd->nr_pages;
        int ret, do_wakeup, page_nr;
  
+       if (!spd_pages)
+               return 0;
        ret = 0;
        do_wakeup = 0;
        page_nr = 0;
@@@ -1124,8 -1127,8 +1127,8 @@@ EXPORT_SYMBOL(generic_splice_sendpage)
  /*
   * Attempt to initiate a splice from pipe to file.
   */
 -static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
 -                         loff_t *ppos, size_t len, unsigned int flags)
 +long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
 +                  loff_t *ppos, size_t len, unsigned int flags)
  {
        ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
                                loff_t *, size_t, unsigned int);
  
        return splice_write(pipe, out, ppos, len, flags);
  }
 +EXPORT_SYMBOL(do_splice_from);
  
  /*
   * Attempt to initiate a splice from a file to a pipe.
   */
 -static long do_splice_to(struct file *in, loff_t *ppos,
 -                       struct pipe_inode_info *pipe, size_t len,
 -                       unsigned int flags)
 +long do_splice_to(struct file *in, loff_t *ppos,
 +                struct pipe_inode_info *pipe, size_t len,
 +                unsigned int flags)
  {
        ssize_t (*splice_read)(struct file *, loff_t *,
                               struct pipe_inode_info *, size_t, unsigned int);
  
        return splice_read(in, ppos, pipe, len, flags);
  }
 +EXPORT_SYMBOL(do_splice_to);
  
  /**
   * splice_direct_to_actor - splices data directly between two non-pipes
diff --combined kernel/sched.c
@@@ -2084,6 -2084,19 +2084,19 @@@ EXPORT_SYMBOL_GPL(account_system_vtime)
  
  #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
  
+ static inline void account_reset_rq(struct rq *rq)
+ {
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+       rq->prev_irq_time = 0;
+ #endif
+ #ifdef CONFIG_PARAVIRT
+       rq->prev_steal_time = 0;
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+       rq->prev_steal_time_rq = 0;
+ #endif
+ }
  #ifdef CONFIG_PARAVIRT
  static inline u64 steal_ticks(u64 steal)
  {
@@@ -5304,7 -5317,6 +5317,7 @@@ int can_nice(const struct task_struct *
        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
  }
 +EXPORT_SYMBOL_GPL(can_nice);
  
  #ifdef __ARCH_WANT_SYS_NICE
  
@@@ -6852,6 -6864,7 +6865,7 @@@ migration_call(struct notifier_block *n
  
        case CPU_UP_PREPARE:
                rq->calc_load_update = calc_load_update;
+               account_reset_rq(rq);
                break;
  
        case CPU_ONLINE:
@@@ -8500,7 -8513,6 +8514,7 @@@ void __init sched_init(void
  #ifdef CONFIG_CGROUP_SCHED
        list_add(&root_task_group.list, &task_groups);
        INIT_LIST_HEAD(&root_task_group.children);
 +      INIT_LIST_HEAD(&root_task_group.siblings);
        autogroup_init(&init_task);
  #endif /* CONFIG_CGROUP_SCHED */
  
diff --combined kernel/trace/trace.c
@@@ -3565,7 -3565,10 +3565,10 @@@ static ssize_t tracing_splice_read_pipe
  
        spd.nr_pages = i;
  
-       ret = splice_to_pipe(pipe, &spd);
+       if (i)
+               ret = splice_to_pipe(pipe, &spd);
+       else
+               ret = 0;
  out:
        splice_shrink_spd(&spd);
        return ret;
@@@ -4436,7 -4439,7 +4439,7 @@@ static const struct file_operations tra
  };
  
  struct dentry *trace_create_file(const char *name,
 -                               mode_t mode,
 +                               umode_t mode,
                                 struct dentry *parent,
                                 void *data,
                                 const struct file_operations *fops)
diff --combined mm/memory.c
@@@ -1403,7 -1403,6 +1403,7 @@@ unsigned long zap_page_range(struct vm_
        tlb_finish_mmu(&tlb, address, end);
        return end;
  }
 +EXPORT_SYMBOL_GPL(zap_page_range);
  
  /**
   * zap_vma_ptes - remove ptes mapping the vma
@@@ -3559,9 -3558,8 +3559,9 @@@ retry
  
                barrier();
                if (pmd_trans_huge(orig_pmd)) {
 -                      if (flags & FAULT_FLAG_WRITE &&
 -                          !pmd_write(orig_pmd) &&
 +                      unsigned int dirty = flags & FAULT_FLAG_WRITE;
 +
 +                      if (dirty && !pmd_write(orig_pmd) &&
                            !pmd_trans_splitting(orig_pmd)) {
                                ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
                                                          orig_pmd);
                                if (unlikely(ret & VM_FAULT_OOM))
                                        goto retry;
                                return ret;
 +                      } else {
 +                              huge_pmd_set_accessed(mm, vma, address, pmd,
 +                                                    orig_pmd, dirty);
                        }
                        return 0;
                }
         */
        if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
                return VM_FAULT_OOM;
-       /* if an huge pmd materialized from under us just retry later */
-       if (unlikely(pmd_trans_huge(*pmd)))
+       /*
+        * If a huge pmd materialized under us just retry later.  Use
+        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
+        * didn't become pmd_trans_huge under us and then back to pmd_none, as
+        * a result of MADV_DONTNEED running immediately after a huge pmd fault
+        * in a different thread of this mm, in turn leading to a misleading
+        * pmd_trans_huge() retval.  All we have to ensure is that it is a
+        * regular pmd that we can walk with pte_offset_map() and we can do that
+        * through an atomic read in C, which is what pmd_trans_unstable()
+        * provides.
+        */
+       if (unlikely(pmd_trans_unstable(pmd)))
                return 0;
        /*
         * A regular pmd is established and it can't morph into a huge pmd
@@@ -3880,11 -3885,7 +3890,11 @@@ static int __access_remote_vm(struct ta
                        vma = find_vma(mm, addr);
                        if (!vma || vma->vm_start > addr)
                                break;
 -                      if (vma->vm_ops && vma->vm_ops->access)
 +                      if ((vma->vm_flags & VM_PFNMAP) &&
 +                          !(vma->vm_flags & VM_IO))
 +                              ret = generic_access_phys(vma, addr, buf,
 +                                                        len, write);
 +                      if (ret <= 0 && vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,
                                                          len, write);
                        if (ret <= 0)
  /* Transmit duration for the raw data part of an average sized packet */
  #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
  
 +/*
 + * Define group sort order: HT40 -> SGI -> #streams
 + */
 +#define GROUP_IDX(_streams, _sgi, _ht40)      \
 +      MINSTREL_MAX_STREAMS * 2 * _ht40 +      \
 +      MINSTREL_MAX_STREAMS * _sgi +           \
 +      _streams - 1
 +
  /* MCS rate information for an MCS group */
 -#define MCS_GROUP(_streams, _sgi, _ht40) {                            \
 +#define MCS_GROUP(_streams, _sgi, _ht40)                              \
 +      [GROUP_IDX(_streams, _sgi, _ht40)] = {                          \
        .streams = _streams,                                            \
        .flags =                                                        \
                (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |                 \
@@@ -67,9 -58,6 +67,9 @@@
   * To enable sufficiently targeted rate sampling, MCS rates are divided into
   * groups, based on the number of streams and flags (HT40, SGI) that they
   * use.
 + *
 + * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
 + * HT40 -> SGI -> #streams
   */
  const struct mcs_group minstrel_mcs_groups[] = {
        MCS_GROUP(1, 0, 0),
@@@ -114,9 -102,21 +114,9 @@@ minstrel_ewma(int old, int new, int wei
  static int
  minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
  {
 -      int streams = (rate->idx / MCS_GROUP_RATES) + 1;
 -      u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
 -      int i;
 -
 -      for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
 -              if (minstrel_mcs_groups[i].streams != streams)
 -                      continue;
 -              if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
 -                      continue;
 -
 -              return i;
 -      }
 -
 -      WARN_ON(1);
 -      return 0;
 +      return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
 +                       !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
 +                       !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
  }
  
  static inline struct minstrel_rate_stats *
@@@ -130,7 -130,7 +130,7 @@@ minstrel_get_ratestats(struct minstrel_
   * Recalculate success probabilities and counters for a rate using EWMA
   */
  static void
 -minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
 +minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
  {
        if (unlikely(mr->attempts > 0)) {
                mr->sample_skipped = 0;
   * the expected number of retransmissions and their expected length
   */
  static void
 -minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
 -                    int group, int rate)
 +minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
  {
        struct minstrel_rate_stats *mr;
        unsigned int usecs;
@@@ -225,8 -226,8 +225,8 @@@ minstrel_ht_update_stats(struct minstre
                        mr = &mg->rates[i];
                        mr->retry_updated = false;
                        index = MCS_GROUP_RATES * group + i;
 -                      minstrel_calc_rate_ewma(mp, mr);
 -                      minstrel_ht_calc_tp(mp, mi, group, i);
 +                      minstrel_calc_rate_ewma(mr);
 +                      minstrel_ht_calc_tp(mi, group, i);
  
                        if (!mr->cur_tp)
                                continue;
  static bool
  minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
  {
 -      if (!rate->count)
 +      if (rate->idx < 0)
                return false;
  
 -      if (rate->idx < 0)
 +      if (!rate->count)
                return false;
  
        return !!(rate->flags & IEEE80211_TX_RC_MCS);
@@@ -356,7 -357,7 +356,7 @@@ minstrel_downgrade_rate(struct minstrel
  }
  
  static void
 -minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
 +minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
  {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
        if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
                return;
  
-       ieee80211_start_tx_ba_session(pubsta, tid, 5000);
+       ieee80211_start_tx_ba_session(pubsta, tid, 0);
  }
  
  static void
@@@ -454,7 -455,7 +454,7 @@@ minstrel_ht_tx_status(void *priv, struc
        if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
                minstrel_ht_update_stats(mp, mi);
                if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
 -                      minstrel_aggr_check(mp, sta, skb);
 +                      minstrel_aggr_check(sta, skb);
        }
  }
  
@@@ -514,6 -515,7 +514,6 @@@ minstrel_calc_retransmit(struct minstre
  static void
  minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                       struct ieee80211_tx_rate *rate, int index,
 -                     struct ieee80211_tx_rate_control *txrc,
                       bool sample, bool rtscts)
  {
        const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@@@ -567,13 -569,6 +567,13 @@@ minstrel_get_sample_rate(struct minstre
        sample_idx += mi->sample_group * MCS_GROUP_RATES;
        minstrel_next_sample_idx(mi);
  
 +      /*
 +       * Sampling might add some overhead (RTS, no aggregation)
 +       * to the frame. Hence, don't use sampling for the currently
 +       * used max TP rate.
 +       */
 +      if (sample_idx == mi->max_tp_rate)
 +              return -1;
        /*
         * When not using MRR, do not sample if the probability is already
         * higher than 95% to avoid wasting airtime
@@@ -633,11 -628,11 +633,11 @@@ minstrel_ht_get_rate(void *priv, struc
        if (sample_idx >= 0) {
                sample = true;
                minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
 -                      txrc, true, false);
 +                      true, false);
                info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
                minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
 -                      txrc, false, false);
 +                      false, false);
        }
  
        if (mp->hw->max_rates >= 3) {
                 */
                if (sample_idx >= 0)
                        minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
 -                              txrc, false, false);
 +                              false, false);
                else
                        minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
 -                              txrc, false, true);
 +                              false, true);
  
                minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
 -                                   txrc, false, !sample);
 +                                   false, !sample);
  
                ar[3].count = 0;
                ar[3].idx = -1;
                 * max_tp_rate -> max_prob_rate by default.
                 */
                minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
 -                                   txrc, false, !sample);
 +                                   false, !sample);
  
                ar[2].count = 0;
                ar[2].idx = -1;
@@@ -699,7 -694,6 +699,7 @@@ minstrel_ht_update_caps(void *priv, str
        int ack_dur;
        int stbc;
        int i;
 +      unsigned int smps;
  
        /* fall back to the old minstrel for legacy stations */
        if (!sta->ht_cap.ht_supported)
            oper_chan_type != NL80211_CHAN_HT40PLUS)
                sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
  
 +      smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
 +              IEEE80211_HT_CAP_SM_PS_SHIFT;
 +
        for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
                u16 req = 0;
  
                if ((sta_cap & req) != req)
                        continue;
  
 +              /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
 +              if (smps == WLAN_HT_CAP_SM_PS_STATIC &&
 +                  minstrel_mcs_groups[i].streams > 1)
 +                      continue;
 +
                mi->groups[i].supported =
                        mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
  
@@@ -817,7 -803,7 +817,7 @@@ minstrel_ht_alloc_sta(void *priv, struc
                        max_rates = sband->n_bitrates;
        }
  
 -      msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
 +      msp = kzalloc(sizeof(*msp), gfp);
        if (!msp)
                return NULL;