linux-5.15/get_page_from_freelist()
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#z45dce67]
#backlinks
*説明 [#aab37cfb]
-パス: [[linux-5.15/mm/page_alloc.c]]
-FIXME: これは何?
--説明
**引数 [#bc012eab]
-gfp_t gfp_mask
--
--[[linux-5.15/gfp_t]]
-unsigned int order
--
-int alloc_flags
--
-const struct alloc_context *ac
--
--[[linux-5.15/alloc_context]]
**返り値 [#w1b0cf32]
-struct page *
--
--[[linux-5.15/page]]
**参考 [#f1a6c534]
*実装 [#b3f149fb]
/*
* get_page_from_freelist goes through the zonelist tryi...
* a page.
*/
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int orde...
const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
struct pglist_data *last_pgdat_dirty_limit = NULL;
bool no_fallback;
-
--[[linux-5.15/zoneref]]
--[[linux-5.15/zone]]
--[[linux-5.15/pglist_data]]
retry:
/*
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed() comment in kernel/c...
*/
no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
z = ac->preferred_zoneref;
for_next_zone_zonelist_nodemask(zone, z, ac->highest_zo...
ac->nodemask) {
struct page *page;
unsigned long mark;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp_mask))
continue;
-
--[[linux-5.15/for_next_zone_zonelist_nodemask()]]
--[[linux-5.15/page]]
--[[linux-5.15/cpusets_enabled()]]
--[[linux-5.15/__cpuset_zone_allowed()]]
/*
* When allocating a page cache page for writing, we
* want to get it from a node that is within its dirty
* limit, such that no single node holds more than its
* proportional share of globally allowed dirty pages.
* The dirty limits take into account the node's
* lowmem reserves and high watermark so that kswapd
* should be able to balance it without having to
* write pages from its LRU list.
*
* XXX: For now, allow allocations to potentially
* exceed the per-node dirty limit in the slowpath
* (spread_dirty_pages unset) before going into reclaim,
* which is important when on a NUMA setup the allowed
* nodes are together not big enough to reach the
* global limit. The proper fix for these situations
* will require awareness of nodes in the
* dirty-throttling and the flusher threads.
*/
if (ac->spread_dirty_pages) {
if (last_pgdat_dirty_limit == zone->zone_pgdat)
continue;
if (!node_dirty_ok(zone->zone_pgdat)) {
last_pgdat_dirty_limit = zone->zone_pgdat;
continue;
}
}
-
--[[linux-5.15/node_dirty_ok()]]
if (no_fallback && nr_online_nodes > 1 &&
zone != ac->preferred_zoneref->zone) {
int local_nid;
/*
* If moving to a remote node, retry but allow
* fragmenting fallbacks. Locality is more important
* than fragmentation avoidance.
*/
local_nid = zone_to_nid(ac->preferred_zoneref->zone);
if (zone_to_nid(zone) != local_nid) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
}
-
--[[linux-5.15/zone_to_nid()]]
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MAS...
if (!zone_watermark_fast(zone, order, mark,
ac->highest_zoneidx, alloc_flags,
gfp_mask)) {
int ret;
-
--[[linux-5.15/wmark_pages()]]
--[[linux-5.15/zone_watermark_fast()]]
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* Watermark failed for this zone, but see if we can
* grow this zone if it contains deferred pages.
*/
if (static_branch_unlikely(&deferred_pages)) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
#endif
-
--[[linux-5.15/static_branch_unlikely()]]
--[[linux-5.15/_deferred_grow_zone()]]
/* Checked here to keep the fast path fast */
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
if (!node_reclaim_enabled() ||
!zone_allows_reclaim(ac->preferred_zoneref->zone,...
continue;
-
--[[linux-5.15/BUILD_BUG_ON()]]
--[[linux-5.15/node_reclaim_enabled()]]
--[[linux-5.15/zone_allows_reclaim()]]
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
switch (ret) {
case NODE_RECLAIM_NOSCAN:
/* did not scan */
continue;
case NODE_RECLAIM_FULL:
/* scanned but unreclaimable */
continue;
default:
/* did we reclaim enough */
if (zone_watermark_ok(zone, order, mark,
ac->highest_zoneidx, alloc_flags))
goto try_this_zone;
continue;
}
}
-
--[[linux-5.15/node_reclaim()]]
--[[linux-5.15/zone_watermark_ok()]]
try_this_zone:
page = rmqueue(ac->preferred_zoneref->zone, zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
/*
* If this is a high-order atomic allocation then check
* if the pageblock should be reserved for the future
*/
if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
reserve_highatomic_pageblock(page, zone, order);
return page;
-
--[[linux-5.15/rmqueue()]]
--[[linux-5.15/prep_new_page()]]
--[[linux-5.15/unlikely()]]
--[[linux-5.15/reserve_highatomic_pageblock()]]
} else {
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/* Try again if zone has deferred pages */
if (static_branch_unlikely(&deferred_pages)) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
#endif
}
}
-
--[[linux-5.15/static_branch_likely()]]
--[[linux-5.15/_deferred_grow_zone()]]
/*
* It's possible on a UMA machine to get through all zo...
* fragmented. If avoiding fragmentation, reset and try...
*/
if (no_fallback) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
return NULL;
}
*コメント [#ibf469ab]
終了行:
*参照元 [#z45dce67]
#backlinks
*説明 [#aab37cfb]
-パス: [[linux-5.15/mm/page_alloc.c]]
-FIXME: これは何?
--説明
**引数 [#bc012eab]
-gfp_t gfp_mask
--
--[[linux-5.15/gfp_t]]
-unsigned int order
--
-int alloc_flags
--
-const struct alloc_context *ac
--
--[[linux-5.15/alloc_context]]
**返り値 [#w1b0cf32]
-struct page *
--
--[[linux-5.15/page]]
**参考 [#f1a6c534]
*実装 [#b3f149fb]
/*
* get_page_from_freelist goes through the zonelist tryi...
* a page.
*/
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int orde...
const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
struct pglist_data *last_pgdat_dirty_limit = NULL;
bool no_fallback;
-
--[[linux-5.15/zoneref]]
--[[linux-5.15/zone]]
--[[linux-5.15/pglist_data]]
retry:
/*
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed() comment in kernel/c...
*/
no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
z = ac->preferred_zoneref;
for_next_zone_zonelist_nodemask(zone, z, ac->highest_zo...
ac->nodemask) {
struct page *page;
unsigned long mark;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!__cpuset_zone_allowed(zone, gfp_mask))
continue;
-
--[[linux-5.15/for_next_zone_zonelist_nodemask()]]
--[[linux-5.15/page]]
--[[linux-5.15/cpusets_enabled()]]
--[[linux-5.15/__cpuset_zone_allowed()]]
/*
* When allocating a page cache page for writing, we
* want to get it from a node that is within its dirty
* limit, such that no single node holds more than its
* proportional share of globally allowed dirty pages.
* The dirty limits take into account the node's
* lowmem reserves and high watermark so that kswapd
* should be able to balance it without having to
* write pages from its LRU list.
*
* XXX: For now, allow allocations to potentially
* exceed the per-node dirty limit in the slowpath
* (spread_dirty_pages unset) before going into reclaim,
* which is important when on a NUMA setup the allowed
* nodes are together not big enough to reach the
* global limit. The proper fix for these situations
* will require awareness of nodes in the
* dirty-throttling and the flusher threads.
*/
if (ac->spread_dirty_pages) {
if (last_pgdat_dirty_limit == zone->zone_pgdat)
continue;
if (!node_dirty_ok(zone->zone_pgdat)) {
last_pgdat_dirty_limit = zone->zone_pgdat;
continue;
}
}
-
--[[linux-5.15/node_dirty_ok()]]
if (no_fallback && nr_online_nodes > 1 &&
zone != ac->preferred_zoneref->zone) {
int local_nid;
/*
* If moving to a remote node, retry but allow
* fragmenting fallbacks. Locality is more important
* than fragmentation avoidance.
*/
local_nid = zone_to_nid(ac->preferred_zoneref->zone);
if (zone_to_nid(zone) != local_nid) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
}
-
--[[linux-5.15/zone_to_nid()]]
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MAS...
if (!zone_watermark_fast(zone, order, mark,
ac->highest_zoneidx, alloc_flags,
gfp_mask)) {
int ret;
-
--[[linux-5.15/wmark_pages()]]
--[[linux-5.15/zone_watermark_fast()]]
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* Watermark failed for this zone, but see if we can
* grow this zone if it contains deferred pages.
*/
if (static_branch_unlikely(&deferred_pages)) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
#endif
-
--[[linux-5.15/static_branch_unlikely()]]
--[[linux-5.15/_deferred_grow_zone()]]
/* Checked here to keep the fast path fast */
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
if (!node_reclaim_enabled() ||
!zone_allows_reclaim(ac->preferred_zoneref->zone,...
continue;
-
--[[linux-5.15/BUILD_BUG_ON()]]
--[[linux-5.15/node_reclaim_enabled()]]
--[[linux-5.15/zone_allows_reclaim()]]
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
switch (ret) {
case NODE_RECLAIM_NOSCAN:
/* did not scan */
continue;
case NODE_RECLAIM_FULL:
/* scanned but unreclaimable */
continue;
default:
/* did we reclaim enough */
if (zone_watermark_ok(zone, order, mark,
ac->highest_zoneidx, alloc_flags))
goto try_this_zone;
continue;
}
}
-
--[[linux-5.15/node_reclaim()]]
--[[linux-5.15/zone_watermark_ok()]]
try_this_zone:
page = rmqueue(ac->preferred_zoneref->zone, zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
/*
* If this is a high-order atomic allocation then check
* if the pageblock should be reserved for the future
*/
if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
reserve_highatomic_pageblock(page, zone, order);
return page;
-
--[[linux-5.15/rmqueue()]]
--[[linux-5.15/prep_new_page()]]
--[[linux-5.15/unlikely()]]
--[[linux-5.15/reserve_highatomic_pageblock()]]
} else {
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/* Try again if zone has deferred pages */
if (static_branch_unlikely(&deferred_pages)) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
#endif
}
}
-
--[[linux-5.15/static_branch_likely()]]
--[[linux-5.15/_deferred_grow_zone()]]
/*
* It's possible on a UMA machine to get through all zo...
* fragmented. If avoiding fragmentation, reset and try...
*/
if (no_fallback) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
return NULL;
}
*コメント [#ibf469ab]
ページ名: