#author("2025-09-11T20:14:20+09:00","default:guest","guest")
#author("2025-09-11T20:14:40+09:00","default:guest","guest")
*参照元 [#w4116c8e]
#backlinks

*説明 [#e45b90b4]
-パス: [[linux-5.15/mm/vmscan.c]]

-FIXME: これは何?
--説明


**引数 [#ncde76d3]
-unsigned long nr_to_scan
--
-struct lruvec *lruvec
--
--[[linux-5.15/lruvec]]
-struct list_head *dst
--
--[[linux-5.15/list_head]]
-unsigned long *nr_scanned
--
-struct scan_control *sc
--
--[[linux-5.15/scan_control]]
-enum lru_list lru
--
--[[linux-5.15/lru_list]]


**返り値 [#g28b4382]
-
-unsigned long
--


**参考 [#cf2e02fa]


*実装 [#t08d6a17]

 /*
  * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
  *
  * lruvec->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
  * and working on them outside the LRU lock.
  *
  * For pagecache intensive workloads, this function is the hottest
  * spot in the kernel (apart from copy_*_user functions).
  *
  * Lru_lock must be held before calling this function.
  *
  * @nr_to_scan:	The number of eligible pages to look through on the list.
  * @lruvec:	The LRU vector to pull pages from.
  * @dst:	The temp list to put pages on to.
  * @nr_scanned:	The number of pages that were scanned.
  * @sc:		The scan_control struct for this reclaim session
  * @lru:	LRU list id for isolating
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 		struct lruvec *lruvec, struct list_head *dst,
 		unsigned long *nr_scanned, struct scan_control *sc,
 		enum lru_list lru)
 {
 	struct list_head *src = &lruvec->lists[lru];
 	unsigned long nr_taken = 0;
 	unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
 	unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
 	unsigned long skipped = 0;
 	unsigned long scan, total_scan, nr_pages;
 	LIST_HEAD(pages_skipped);
 	isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
 
-
--[[linux-5.15/list_head]]
--[[linux-5.15/LIST_HEAD()]]

 	total_scan = 0;
 	scan = 0;
 	while (scan < nr_to_scan && !list_empty(src)) {
 		struct page *page;
 
 		page = lru_to_page(src);
 		prefetchw_prev_lru_page(page, src, flags);
 
 		nr_pages = compound_nr(page);
 		total_scan += nr_pages;
 
 		if (page_zonenum(page) > sc->reclaim_idx) {
 			list_move(&page->lru, &pages_skipped);
 			nr_skipped[page_zonenum(page)] += nr_pages;
 			continue;
 		}
 
-
--[[linux-5.15/list_empty()]]
--[[linux-5.15/lru_to_page()]]
--[[linux-5.15/prefetchw_prev_lru_page()]]
--[[linux-5.15/page_zonenum()]]
--[[linux-5.15/list_move()]]

 		/*
 		 * Do not count skipped pages because that makes the function
 		 * return with no isolated pages if the LRU mostly contains
 		 * ineligible pages.  This causes the VM to not reclaim any
 		 * pages, triggering a premature OOM.
 		 *
 		 * Account all tail pages of THP.  This would not cause
 		 * premature OOM since __isolate_lru_page() returns -EBUSY
 		 * only when the page is being freed somewhere else.
 		 */
 		scan += nr_pages;
 		if (!__isolate_lru_page_prepare(page, mode)) {
 			/* It is being freed elsewhere */
 			list_move(&page->lru, src);
 			continue;
 		}
-
--[[linux-5.15/isolate_lru_page_prepare()]]
--[[linux-5.15/list_move()]]

 		/*
 		 * Be careful not to clear PageLRU until after we're
 		 * sure the page is not being freed elsewhere -- the
 		 * page release code relies on it.
 		 */
 		if (unlikely(!get_page_unless_zero(page))) {
 			list_move(&page->lru, src);
 			continue;
 		}
 
-
--[[linux-5.15/unlikely()]]
--[[linux-5.15/get_page_unless_zero()]]
--[[linux-5.15/list_move()]]

 		if (!TestClearPageLRU(page)) {
 			/* Another thread is already isolating this page */
 			put_page(page);
 			list_move(&page->lru, src);
 			continue;
 		}
 
-
--[[linux-5.15/TestClearPageLRU()]]
--[[linux-5.15/put_page()]]
--[[linux-5.15/list_move()]]

 		nr_taken += nr_pages;
 		nr_zone_taken[page_zonenum(page)] += nr_pages;
 		list_move(&page->lru, dst);
 	}
 
-
--[[linux-5.15/list_move()]]

 	/*
 	 * Splice any skipped pages to the start of the LRU list. Note that
 	 * this disrupts the LRU order when reclaiming for lower zones but
 	 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
 	 * scanning would soon rescan the same pages to skip and put the
 	 * system at risk of premature OOM.
 	 */
 	if (!list_empty(&pages_skipped)) {
 		int zid;
 
 		list_splice(&pages_skipped, src);
 		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 			if (!nr_skipped[zid])
 				continue;
 
 			__count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
 			skipped += nr_skipped[zid];
 		}
 	}
 	*nr_scanned = total_scan;
 	trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
 				    total_scan, skipped, nr_taken, mode, lru);
 	update_lru_sizes(lruvec, lru, nr_zone_taken);
 	return nr_taken;
 }

-
--[[linux-5.15/list_empty()]]
--[[linux-5.15/list_splice()]]
--[[linux-5.15/__count_zid_vm_events()]]
--[[linux-5.15/trace_mm_vmscan_lru_isolate()]]
--[[linux-5.15/update_lru_sizes()]]


*コメント [#r2c53b57]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS