}
/**
- * lru_cache_add: add a page to the page lists
+ * lru_cache_add_anon - add a page to the page lists
* @page: the page to add
*/
void lru_cache_add_anon(struct page *page)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-void lru_add_drain_all_cpuslocked(void)
+/*
+ * Doesn't need any cpu hotplug locking because we do rely on per-cpu
+ * kworkers being shut down before our page_alloc_cpu_dead callback is
+ * executed on the offlined cpu.
+ * Calling this function with cpu hotplug locks held can actually lead
+ * to obscure indirect dependencies via WQ context.
+ */
+void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
static struct cpumask has_work;
mutex_unlock(&lock);
}
-void lru_add_drain_all(void)
-{
- get_online_cpus();
- lru_add_drain_all_cpuslocked();
- put_online_cpus();
-}
-
/**
* release_pages - batched put_page()
* @pages: array of pages to release
* @pvec: Where the resulting entries are placed
* @mapping: The address_space to search
* @start: The starting entry index
- * @nr_entries: The maximum number of entries
+ * @nr_pages: The maximum number of pages
* @indices: The cache indices corresponding to the entries in @pvec
*
* pagevec_lookup_entries() will search for and return a group of up
- * to @nr_entries pages and shadow entries in the mapping. All
+ * to @nr_pages pages and shadow entries in the mapping. All
* entries are placed in @pvec. pagevec_lookup_entries() takes a
* reference against actual pages in @pvec.
*
*/
unsigned pagevec_lookup_entries(struct pagevec *pvec,
struct address_space *mapping,
- pgoff_t start, unsigned nr_pages,
+ pgoff_t start, unsigned nr_entries,
pgoff_t *indices)
{
- pvec->nr = find_get_entries(mapping, start, nr_pages,
+ pvec->nr = find_get_entries(mapping, start, nr_entries,
pvec->pages, indices);
return pagevec_count(pvec);
}
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index
- * @nr_pages: The maximum number of pages
*
- * pagevec_lookup_range() will search for and return a group of up to @nr_pages
+ * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
* pages in the mapping starting from index @start and upto index @end
* (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
* reference against the pages in @pvec.
* also update @start to index the next page for the traversal.
*
* pagevec_lookup_range() returns the number of pages which were found. If this
- * number is smaller than @nr_pages, the end of specified range has been
+ * number is smaller than PAGEVEC_SIZE, the end of specified range has been
* reached.
*/
unsigned pagevec_lookup_range(struct pagevec *pvec,