/* How many pages do we try to swap or page in/out together? */
int page_cluster;
+/*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs. But it gets used by networking.
+ */
+static void fastcall __page_cache_release(struct page *page)
+{
+ if (PageLRU(page)) {
+ unsigned long flags;
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ VM_BUG_ON(!PageLRU(page));
+ __ClearPageLRU(page);
+ del_page_from_lru(zone, page);
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ }
+ free_hot_page(page);
+}
+
static void put_compound_page(struct page *page)
{
page = (struct page *)page_private(page);
}
EXPORT_SYMBOL(put_page);
+/**
+ * put_pages_list(): release a list of pages
+ *
+ * Release a list of pages which are strung together on page.lru. Currently
+ * used by read_cache_pages() and related error recovery code.
+ *
+ * @pages: list of pages threaded on page->lru
+ */
+void put_pages_list(struct list_head *pages)
+{
+ while (!list_empty(pages)) {
+ struct page *victim;
+
+ victim = list_entry(pages->prev, struct page, lru);
+ list_del(&victim->lru);
+ page_cache_release(victim);
+ }
+}
+EXPORT_SYMBOL(put_pages_list);
+
/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
zone = page_zone(page);
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page) && !PageActive(page)) {
- list_del(&page->lru);
- list_add_tail(&page->lru, &zone->inactive_list);
- inc_page_state(pgrotated);
+ list_move_tail(&page->lru, &zone->inactive_list);
+ __count_vm_event(PGROTATED);
}
if (!test_clear_page_writeback(page))
BUG();
del_page_from_inactive_list(zone, page);
SetPageActive(page);
add_page_to_active_list(zone, page);
- inc_page_state(pgactivate);
+ __count_vm_event(PGACTIVATE);
}
spin_unlock_irq(&zone->lru_lock);
}
}
#endif
-/*
- * This path almost never happens for VM activity - pages are normally
- * freed via pagevecs. But it gets used by networking.
- */
-void fastcall __page_cache_release(struct page *page)
-{
- if (PageLRU(page)) {
- unsigned long flags;
- struct zone *zone = page_zone(page);
-
- spin_lock_irqsave(&zone->lru_lock, flags);
- BUG_ON(!PageLRU(page));
- __ClearPageLRU(page);
- del_page_from_lru(zone, page);
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- }
- free_hot_page(page);
-}
-EXPORT_SYMBOL(__page_cache_release);
-
/*
* Batched page_cache_release(). Decrement the reference count on all the
* passed pages. If it fell to zero then remove the page from the LRU and
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
- BUG_ON(!PageLRU(page));
+ VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
del_page_from_lru(zone, page);
}
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- BUG_ON(PageLRU(page));
+ VM_BUG_ON(PageLRU(page));
if (put_page_testzero(page))
pagevec_add(&pages_to_free, page);
}
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
- BUG_ON(PageLRU(page));
+ VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_inactive_list(zone, page);
}
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
- BUG_ON(PageLRU(page));
+ VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- BUG_ON(PageActive(page));
+ VM_BUG_ON(PageActive(page));
SetPageActive(page);
add_page_to_active_list(zone, page);
}
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SMP
-void percpu_counter_mod(struct percpu_counter *fbc, long amount)
-{
- long count;
- long *pcount;
- int cpu = get_cpu();
-
- pcount = per_cpu_ptr(fbc->counters, cpu);
- count = *pcount + amount;
- if (count >= FBC_BATCH || count <= -FBC_BATCH) {
- spin_lock(&fbc->lock);
- fbc->count += count;
- *pcount = 0;
- spin_unlock(&fbc->lock);
- } else {
- *pcount = count;
- }
- put_cpu();
-}
-EXPORT_SYMBOL(percpu_counter_mod);
-
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-long percpu_counter_sum(struct percpu_counter *fbc)
-{
- long ret;
- int cpu;
-
- spin_lock(&fbc->lock);
- ret = fbc->count;
- for_each_possible_cpu(cpu) {
- long *pcount = per_cpu_ptr(fbc->counters, cpu);
- ret += *pcount;
- }
- spin_unlock(&fbc->lock);
- return ret < 0 ? 0 : ret;
-}
-EXPORT_SYMBOL(percpu_counter_sum);
-#endif
-
/*
* Perform any setup for the swap system
*/