#author("2025-09-11T16:25:35+09:00","default:guest","guest") #author("2025-09-11T17:04:10+09:00","default:guest","guest") *参照元 [#vd9b0e58] #backlinks *説明 [#h80606b8] -パス: [[linux-5.15/mm/vmscan.c]] -FIXME: これは何? --説明 **引数 [#s7be8084] -pg_data_t *pgdat -- --[[linux-5.15/pg_data_t]] -int alloc_order -- -int reclaim_order -- -unsigned int highest_zoneidx -- --メモリを確保する最大のゾーンインデックス値。この値以下のゾーンがチェック対象です。 **返り値 [#h35fa71c] -なし **参考 [#q809dd11] *実装 [#v851d86e] static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, unsigned int highest_zoneidx) { long remaining = 0; DEFINE_WAIT(wait); if (freezing(current) || kthread_should_stop()) return; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); - --[[linux-5.15/DEFINE_WAIT()]] --[[linux-5.15/freezing()]] --[[linux-5.15/kthread_should_stop()]] --[[linux-5.15/prepare_to_wait()]] /* * Try to sleep for a short interval. Note that kcompactd will only be * woken if it is possible to sleep for a short interval. This is * deliberate on the assumption that if reclaim cannot keep an * eligible zone balanced that it's also unlikely that compaction will * succeed. */ if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { - --[[linux-5.15/prepare_kswapd_sleep()]] /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. * When kswapd is going to sleep, it is reasonable to assume * that pages and compaction may succeed so reset the cache. */ reset_isolation_suitable(pgdat); - --[[linux-5.15/reset_isolation_suitable()]] /* * We have freed the memory, now we should compact it to make * allocation of the requested order possible. */ wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); - --[[linux-5.15/wakeup_kcompactd()]] remaining = schedule_timeout(HZ/10); - --[[linux-5.15/schedule_timeout()]] /* * If woken prematurely then reset kswapd_highest_zoneidx and * order. The values will either be from a wakeup request or * the previous request that slept prematurely. */ if (remaining) { WRITE_ONCE(pgdat->kswapd_highest_zoneidx, kswapd_highest_zoneidx(pgdat, highest_zoneidx)); if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) WRITE_ONCE(pgdat->kswapd_order, reclaim_order); } finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); - --[[linux-5.15/WRITE_ONCE()]] --[[linux-5.15/kswapd_highest_zoneidx()]] --[[linux-5.15/READ_ONCE()]] --[[linux-5.15/finish_wait()]] --[[linux-5.15/prepare_to_wait()]] } /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. */ if (!remaining && prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { trace_mm_vmscan_kswapd_sleep(pgdat->node_id); - --[[linux-5.15/prepare_kswapd_sleep()]] --[[linux-5.15/trace_mm_vmscan_kswapd_sleep()]] /* * vmstat counters are not perfectly accurate and the estimated * value for counters such as NR_FREE_PAGES can deviate from the * true value by nr_online_cpus * threshold. To avoid the zone * watermarks being breached while under pressure, we reduce the * per-cpu vmstat threshold while kswapd is awake and restore * them before going back to sleep. */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); if (!kthread_should_stop()) schedule(); set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); - --[[linux-5.15/set_pgdat_percpu_threshold()]] --[[linux-5.15/kthread_should_stop()]] } else { if (remaining) count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); } finish_wait(&pgdat->kswapd_wait, &wait); } - --[[linux-5.15/count_vm_event()]] --[[linux-5.15/finish_wait()]] *コメント [#y7113d60]