#author("2025-09-11T20:06:59+09:00","default:guest","guest")
#author("2025-09-11T20:07:46+09:00","default:guest","guest")
*参照元 [#t2c8d114]
#backlinks

*説明 [#icb47ab4]
-パス: [[linux-5.15/mm/vmscan.c]]

-FIXME: これは何?
--説明


**引数 [#wc0275d0]
--unsigned long nr_to_scan
--
-struct lruvec *lruvec
--
--[[linux-5.15/lruvec]]
-struct scan_control *sc
--
--[[linux-5.15/scan_control]]
-enum lru_list lru
--
--[[linux-5.15/lru_list]]


**返り値 [#m721ffdf]
-なし


**参考 [#j42f1a41]


*実装 [#bb73a88f]

 /*
  * shrink_active_list() moves pages from the active LRU to the inactive LRU.
  *
  * We move them the other way if the page is referenced by one or more
  * processes.
  *
  * If the pages are mostly unmapped, the processing is fast and it is
  * appropriate to hold lru_lock across the whole operation.  But if
  * the pages are mapped, the processing is slow (page_referenced()), so
  * we should drop lru_lock around each page.  It's impossible to balance
  * this, so instead we remove the pages from the LRU while processing them.
  * It is safe to rely on PG_active against the non-LRU pages in here because
  * nobody will play with that bit on a non-LRU page.
  *
  * The downside is that we have to touch page->_refcount against each page.
  * But we had to alter page->flags anyway.
  */
 static void shrink_active_list(unsigned long nr_to_scan,
 			       struct lruvec *lruvec,
 			       struct scan_control *sc,
 			       enum lru_list lru)
 {
 	unsigned long nr_taken;
 	unsigned long nr_scanned;
 	unsigned long vm_flags;
 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
 	LIST_HEAD(l_active);
 	LIST_HEAD(l_inactive);
 	struct page *page;
 	unsigned nr_deactivate, nr_activate;
 	unsigned nr_rotated = 0;
 	int file = is_file_lru(lru);
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
 
 	lru_add_drain();
 
 	spin_lock_irq(&lruvec->lru_lock);
 
-
--[[linux-5.15/LIST_HEAD()]]
--[[linux-5.15/page]]
--[[linux-5.15/is_file_lru()]]
--[[linux-5.15/pglist_data]]
--[[linux-5.15/lruvec_pgdat()]]
--[[linux-5.15/lru_add_drain()]]
--[[linux-5.15/spin_lock_irq()]]

 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
 				     &nr_scanned, sc, lru);
 
 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
 
-
--[[linux-5.15/isolate_lru_pages()]]
--[[linux-5.15/__mod_node_page_state()]]

 	if (!cgroup_reclaim(sc))
 		__count_vm_events(PGREFILL, nr_scanned);
 	__count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
 
 	spin_unlock_irq(&lruvec->lru_lock);
 
-
--[[linux-5.15/cgroup_reclaim()]]
--[[linux-5.15/__count_vm_events()]]
--[[linux-5.15/lruvec_memcg()]]
--[[linux-5.15/spin_unlock_irq()]]

 	while (!list_empty(&l_hold)) {
 		cond_resched();
 		page = lru_to_page(&l_hold);
 		list_del(&page->lru);
 
-
--[[linux-5.15/list_empty()]]
--[[linux-5.15/cond_resched()]]
--[[linux-5.15/lru_to_page()]]
--[[linux-5.15/list_del()]]

 		if (unlikely(!page_evictable(page))) {
 			putback_lru_page(page);
 			continue;
 		}
 
-
--[[linux-5.15/page_evictable()]]
--[[linux-5.15/putback_lru_page()]]

 		if (unlikely(buffer_heads_over_limit)) {
 			if (page_has_private(page) && trylock_page(page)) {
 				if (page_has_private(page))
 					try_to_release_page(page, 0);
 				unlock_page(page);
 			}
 		}
 
-
--[[linux-5.15/page_has_private()]]
--[[linux-5.15/trylock_page()]]
--[[linux-5.15/try_to_release_page()]]
--[[linux-5.15/unlock_page()]]

 		if (page_referenced(page, 0, sc->target_mem_cgroup,
 				    &vm_flags)) {
 			/*
 			 * Identify referenced, file-backed active pages and
 			 * give them one more trip around the active list. So
 			 * that executable code get better chances to stay in
 			 * memory under moderate memory pressure.  Anon pages
 			 * are not likely to be evicted by use-once streaming
 			 * IO, plus JVM can create lots of anon VM_EXEC pages,
 			 * so we ignore them here.
 			 */
 			if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
 				nr_rotated += thp_nr_pages(page);
 				list_add(&page->lru, &l_active);
 				continue;
 			}
 		}
 
-
--[[linux-5.15/page_referenced()]]
--[[linux-5.15/page_is_file_lru()]]
--[[linux-5.15/thp_nr_pages()]]
--[[linux-5.15/list_add()]]

 		ClearPageActive(page);	/* we are de-activating */
 		SetPageWorkingset(page);
 		list_add(&page->lru, &l_inactive);
 	}
 
-
--[[linux-5.15/ClearPageActive()]]
--[[linux-5.15/SetPageWorkingset()]]
--[[linux-5.15/list_add()]]

 	/*
 	 * Move pages back to the lru list.
 	 */
 	spin_lock_irq(&lruvec->lru_lock);
 
 	nr_activate = move_pages_to_lru(lruvec, &l_active);
 	nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
 	/* Keep all free pages in l_active list */
 	list_splice(&l_inactive, &l_active);
 
-
--[[linux-5.15/spin_lock_irq()]]
--[[linux-5.15/move_pages_to_lru()]]
--[[linux-5.15/list_splice()]]

 	__count_vm_events(PGDEACTIVATE, nr_deactivate);
 	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
 
-
--[[linux-5.15/__count_vm_events()]]
--[[linux-5.15/__count_memcg_events()]]
--[[linux-5.15/lruvec_memcg()]]

 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
 	spin_unlock_irq(&lruvec->lru_lock);
 
-
--[[linux-5.15/__mod_node_page_state()]]
--[[linux-5.15/spin_unlock_irq()]]

 	mem_cgroup_uncharge_list(&l_active);
 	free_unref_page_list(&l_active);
 	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
 			nr_deactivate, nr_rotated, sc->priority, file);
 }

-
--[[linux-5.15/mem_cgroup_uncharge_list()]]
--[[linux-5.15/free_unref_page_list()]]
--[[linux-5.15/trace_mm_vmscan_lru_shrink_active()]]


*コメント [#rd6ee5f5]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS