linux-5.15/shrink_inactive_list()
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#j5150125]
#backlinks
*説明 [#e531e8c6]
-パス: [[linux-5.15/mm/vmscan.c]]
-FIXME: これは何?
--説明
**引数 [#bf840f83]
--unsigned long nr_to_scan
--
-struct lruvec *lruvec
--
--[[linux-5.15/lruvec]]
-struct scan_control *sc
--
--[[linux-5.15/scan_control]]
-enum lru_list lru
--
--[[linux-5.15/lru_list]]
**返り値 [#zfccce4b]
-unsigned long
--回収したページ数。
**参考 [#b08f9718]
*実装 [#neeac27a]
/*
* shrink_inactive_list() is a helper for shrink_node()....
* of reclaimed pages
*/
static unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct lr...
struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
unsigned int nr_reclaimed = 0;
unsigned long nr_taken;
struct reclaim_stat stat;
bool file = is_file_lru(lru);
enum vm_event_item item;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
bool stalled = false;
-
--[[linux-5.15/LIST_HEAD()]]
--[[linux-5.15/reclaim_stat]]
--[[linux-5.15/is_file_lru()]]
--[[linux-5.15/vm_event_item]]
--[[linux-5.15/pglist_data]]
--[[linux-5.15/lruvec_pgdat()]]
while (unlikely(too_many_isolated(pgdat, file, sc))) {
if (stalled)
return 0;
/* wait a bit for the reclaimer. */
msleep(100);
stalled = true;
/* We are about to die and free our memory. Return now...
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
}
-
--[[linux-5.15/unlikely()]]
--[[linux-5.15/too_many_isolated()]]
--[[linux-5.15/msleep()]]
--[[linux-5.15/fatal_signal_pending()]]
lru_add_drain();
-
--[[linux-5.15/lru_add_drain()]]
spin_lock_irq(&lruvec->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_...
&nr_scanned, sc, lru);
-
--[[linux-5.15/spin_lock_irq()]]
--[[linux-5.15/isolate_lru_pages()]]
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, n...
item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIR...
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_scanned);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_sca...
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
spin_unlock_irq(&lruvec->lru_lock);
-
--[[linux-5.15/__mod_node_page_state()]]
--[[linux-5.15/current_is_kswapd()]]
--[[linux-5.15/cgroup_reclaim()]]
--[[linux-5.15/lruvec_memcg()]]
--[[linux-5.15/__count_vm_events()]]
--[[linux-5.15/__count_memcg_events()]]
--[[linux-5.15/spin_unlock_irq()]]
if (nr_taken == 0)
return 0;
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, ...
-
--[[linux-5.15/shrink_page_list()]]
spin_lock_irq(&lruvec->lru_lock);
move_pages_to_lru(lruvec, &page_list);
-
--[[linux-5.15/spin_lock_irq()]]
--[[linux-5.15/move_pages_to_lru()]]
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -...
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_D...
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_rec...
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
spin_unlock_irq(&lruvec->lru_lock);
-
--[[linux-5.15/__mod_node_page_state()]]
--[[linux-5.15/current_is_kswapd()]]
--[[linux-5.15/cgroup_reclaim()]]
--[[linux-5.15/__count_vm_events()]]
--[[linux-5.15/__count_memcg_events()]]
--[[linux-5.15/lruvec_memcg()]]
--[[linux-5.15/spin_unlock_irq()]]
lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
free_unref_page_list(&page_list);
-
--[[linux-5.15/lru_note_cost()]]
--[[linux-5.15/mem_cgroup_uncharge_list()]]
--[[linux-5.15/free_unref_page_list()]]
/*
* If dirty pages are scanned that are not queued for I...
* implies that flushers are not doing their job. This ...
* happen when memory pressure pushes dirty pages to th...
* the LRU before the dirty limits are breached and the...
* data has expired. It can also happen when the propor...
* dirty pages grows not through writes but through mem...
* pressure reclaiming all the clean cache. And in some...
* the flushers simply cannot keep up with the allocation
* rate. Nudge the flusher threads in case they are asl...
*/
if (stat.nr_unqueued_dirty == nr_taken)
wakeup_flusher_threads(WB_REASON_VMSCAN);
-
--[[linux-5.15/wakeup_flusher_threads()]]
sc->nr.dirty += stat.nr_dirty;
sc->nr.congested += stat.nr_congested;
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr.writeback += stat.nr_writeback;
sc->nr.immediate += stat.nr_immediate;
sc->nr.taken += nr_taken;
if (file)
sc->nr.file_taken += nr_taken;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
return nr_reclaimed;
}
-
--[[linux-5.15/trace_mm_vmscan_lru_shrink_inactive()]]
*コメント [#ya3f37e6]
終了行:
*参照元 [#j5150125]
#backlinks
*説明 [#e531e8c6]
-パス: [[linux-5.15/mm/vmscan.c]]
-FIXME: これは何?
--説明
**引数 [#bf840f83]
--unsigned long nr_to_scan
--
-struct lruvec *lruvec
--
--[[linux-5.15/lruvec]]
-struct scan_control *sc
--
--[[linux-5.15/scan_control]]
-enum lru_list lru
--
--[[linux-5.15/lru_list]]
**返り値 [#zfccce4b]
-unsigned long
--回収したページ数。
**参考 [#b08f9718]
*実装 [#neeac27a]
/*
* shrink_inactive_list() is a helper for shrink_node()....
* of reclaimed pages
*/
static unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct lr...
struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
unsigned int nr_reclaimed = 0;
unsigned long nr_taken;
struct reclaim_stat stat;
bool file = is_file_lru(lru);
enum vm_event_item item;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
bool stalled = false;
-
--[[linux-5.15/LIST_HEAD()]]
--[[linux-5.15/reclaim_stat]]
--[[linux-5.15/is_file_lru()]]
--[[linux-5.15/vm_event_item]]
--[[linux-5.15/pglist_data]]
--[[linux-5.15/lruvec_pgdat()]]
while (unlikely(too_many_isolated(pgdat, file, sc))) {
if (stalled)
return 0;
/* wait a bit for the reclaimer. */
msleep(100);
stalled = true;
/* We are about to die and free our memory. Return now...
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
}
-
--[[linux-5.15/unlikely()]]
--[[linux-5.15/too_many_isolated()]]
--[[linux-5.15/msleep()]]
--[[linux-5.15/fatal_signal_pending()]]
lru_add_drain();
-
--[[linux-5.15/lru_add_drain()]]
spin_lock_irq(&lruvec->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_...
&nr_scanned, sc, lru);
-
--[[linux-5.15/spin_lock_irq()]]
--[[linux-5.15/isolate_lru_pages()]]
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, n...
item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIR...
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_scanned);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_sca...
__count_vm_events(PGSCAN_ANON + file, nr_scanned);
spin_unlock_irq(&lruvec->lru_lock);
-
--[[linux-5.15/__mod_node_page_state()]]
--[[linux-5.15/current_is_kswapd()]]
--[[linux-5.15/cgroup_reclaim()]]
--[[linux-5.15/lruvec_memcg()]]
--[[linux-5.15/__count_vm_events()]]
--[[linux-5.15/__count_memcg_events()]]
--[[linux-5.15/spin_unlock_irq()]]
if (nr_taken == 0)
return 0;
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, ...
-
--[[linux-5.15/shrink_page_list()]]
spin_lock_irq(&lruvec->lru_lock);
move_pages_to_lru(lruvec, &page_list);
-
--[[linux-5.15/spin_lock_irq()]]
--[[linux-5.15/move_pages_to_lru()]]
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -...
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_D...
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_rec...
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
spin_unlock_irq(&lruvec->lru_lock);
-
--[[linux-5.15/__mod_node_page_state()]]
--[[linux-5.15/current_is_kswapd()]]
--[[linux-5.15/cgroup_reclaim()]]
--[[linux-5.15/__count_vm_events()]]
--[[linux-5.15/__count_memcg_events()]]
--[[linux-5.15/lruvec_memcg()]]
--[[linux-5.15/spin_unlock_irq()]]
lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
free_unref_page_list(&page_list);
-
--[[linux-5.15/lru_note_cost()]]
--[[linux-5.15/mem_cgroup_uncharge_list()]]
--[[linux-5.15/free_unref_page_list()]]
/*
* If dirty pages are scanned that are not queued for I...
* implies that flushers are not doing their job. This ...
* happen when memory pressure pushes dirty pages to th...
* the LRU before the dirty limits are breached and the...
* data has expired. It can also happen when the propor...
* dirty pages grows not through writes but through mem...
* pressure reclaiming all the clean cache. And in some...
* the flushers simply cannot keep up with the allocation
* rate. Nudge the flusher threads in case they are asl...
*/
if (stat.nr_unqueued_dirty == nr_taken)
wakeup_flusher_threads(WB_REASON_VMSCAN);
-
--[[linux-5.15/wakeup_flusher_threads()]]
sc->nr.dirty += stat.nr_dirty;
sc->nr.congested += stat.nr_congested;
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr.writeback += stat.nr_writeback;
sc->nr.immediate += stat.nr_immediate;
sc->nr.taken += nr_taken;
if (file)
sc->nr.file_taken += nr_taken;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
return nr_reclaimed;
}
-
--[[linux-5.15/trace_mm_vmscan_lru_shrink_inactive()]]
*コメント [#ya3f37e6]
ページ名: