#author("2025-09-11T13:32:19+09:00","default:guest","guest") #author("2025-09-11T13:47:06+09:00","default:guest","guest") *参照元 [#x082c713] #backlinks *説明 [#we98f8de] -パス: [[linux-5.15/mm/vmscan.c]] -kswapdのスレッドメイン関数 **引数 [#b835ab99] -void *p --メモリノード。pg_data_t型のポインタを渡す。 --[[linux-5.15/pg_data_t]] **返り値 [#vacd6ec1] -int --常に0、意味はない --常に0、基本的に終了しないので意味はない。 **参考 [#a9528d89] *実装 [#e3b7008b] /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned int alloc_order, reclaim_order; unsigned int highest_zoneidx = MAX_NR_ZONES - 1; pg_data_t *pgdat = (pg_data_t *)p; struct task_struct *tsk = current; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); - --[[linux-5.15/pg_data_t]] --[[linux-5.15/task_struct]] --[[linux-5.15/cpumask]] --[[linux-5.15/cpumask_of_node()]] --[[linux-5.15/cpumask_empty()]] --[[linux-5.15/set_cpus_allowed_ptr()]] /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); WRITE_ONCE(pgdat->kswapd_order, 0); WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); - --[[linux-5.15/set_freezable()]] --[[linux-5.15/WRITE_ONCE()]] for ( ; ; ) { bool ret; alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); highest_zoneidx = kswapd_highest_zoneidx(pgdat, highest_zoneidx); - --[[linux-5.15/READ_ONCE()]] --[[linux-5.15/kswapd_highest_zoneidx()]] kswapd_try_sleep: kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, highest_zoneidx); - --[[linux-5.15/kswapd_try_to_sleep()]] /* Read the new order and highest_zoneidx */ alloc_order = READ_ONCE(pgdat->kswapd_order); highest_zoneidx = kswapd_highest_zoneidx(pgdat, highest_zoneidx); WRITE_ONCE(pgdat->kswapd_order, 0); WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); ret = try_to_freeze(); if (kthread_should_stop()) break; - --[[linux-5.15/READ_ONCE()]] --[[linux-5.15/kswapd_highest_zoneidx()]] --[[linux-5.15/WRITE_ONCE()]] --[[linux-5.15/try_to_freeze()]] --[[linux-5.15/kthread_should_stop()]] /* * We can speed up thawing tasks if we don't call balance_pgdat * after returning from the refrigerator */ if (ret) continue; /* * Reclaim begins at the requested order but if a high-order * reclaim fails then kswapd falls back to reclaiming for * order-0. If that happens, kswapd will consider sleeping * for the order it finished reclaiming at (reclaim_order) * but kcompactd is woken to compact for the original * request (alloc_order). */ trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, alloc_order); reclaim_order = balance_pgdat(pgdat, alloc_order, highest_zoneidx); if (reclaim_order < alloc_order) goto kswapd_try_sleep; } - --[[linux-5.15/trace_mm_vmscan_kswapd_wake()]] --[[linux-5.15/balance_pgdat()]] tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); return 0; } *コメント [#p37b6e43]