linux-5.15/rmqueue()
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#y99b61f7]
#backlinks
*説明 [#cd36dd34]
-パス: [[linux-5.15/mm/page_alloc.c]]
-FIXME: これは何?
--説明
**引数 [#u34e2f23]
-struct zone *preferred_zone
--
--[[linux-5.15/zone]]
-struct zone *zone
--
--[[linux-5.15/zone]]
-unsigned int order
--
-gfp_t gfp_flags
--
--[[linux-5.15/gfp_t]]
-unsigned int alloc_flags
--
-int migratetype
--
**返り値 [#oecaaeca]
-struct page *
--
--[[linux-5.15/page]]
**参考 [#o5c9f81d]
*実装 [#c2edd239]
/*
* Allocate a page from the given zone. Use pcplists for...
*/
static inline
struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, unsigned int alloc_flags,
int migratetype)
{
unsigned long flags;
struct page *page;
if (likely(pcp_allowed_order(order))) {
/*
* MIGRATE_MOVABLE pcplist could have the pages on CMA...
* we need to skip it when CMA area isn't allowed.
*/
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA...
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype, alloc_flags);
goto out;
}
}
-
--[[linux-5.15/page]]
--[[linux-5.15/likely()]]
--[[linux-5.15/pcp_allowed_order()]]
--[[linux-5.15/IS_ENABLED()]]
--[[linux-5.15/rmqueue_pcplist()]]
/*
* We most definitely don't want callers attempting to
* allocate greater than order-1 page units with __GFP_...
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
spin_lock_irqsave(&zone->lock, flags);
-
--[[linux-5.15/WARN_ON_ONCE()]]
--[[linux-5.15/spin_lock_irqsave()]]
do {
page = NULL;
/*
* order-0 request can reach here when the pcplist is ...
* due to non-CMA allocation context. HIGHATOMIC area is
* reserved for high-order atomic allocation, so order-0
* request should skip it.
*/
if (order > 0 && alloc_flags & ALLOC_HARDER) {
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHAT...
if (page)
trace_mm_page_alloc_zone_locked(page, order, migrate...
}
if (!page)
page = __rmqueue(zone, order, migratetype, alloc_flag...
} while (page && check_new_pages(page, order));
if (!page)
goto failed;
-
--[[linux-5.15/__rmqueue_smallest()]]
--[[linux-5.15/trace_mm_page_alloc_zone_locked()]]
--[[linux-5.15/__rmqueue()]]
--[[linux-5.15/check_new_pages()]]
__mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page));
spin_unlock_irqrestore(&zone->lock, flags);
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 <<...
zone_statistics(preferred_zone, zone, 1);
-
--[[linux-5.15/__mm_zone_freepage_state()]]
--[[linux-5.15/get_pcppage_migratetype()]]
--[[linux-5.15/spin_unlock_irqrestore()]]
--[[linux-5.15/__count_zid_vm_events()]]
--[[linux-5.15/page_zonenum()]]
--[[linux-5.15/zone_statistics()]]
out:
/* Separate test+clear to avoid unnecessary atomics */
if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
wakeup_kswapd(zone, 0, 0, zone_idx(zone));
}
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
-ゾーンのwatermark boostが設定されていたら、kswapdを起こ...
-ZONE_BOOSTED_WATERMARKを設定する箇所は、今のところsteal_...
--[[linux-5.15/test_bit()]]
--[[linux-5.15/clear_bit()]]
--[[linux-5.15/wakeup_kswapd()]]
--[[linux-5.15/zone_idx()]]
--[[linux-5.15/VM_BUG_ON_PAGE()]]
--[[linux-5.15/bad_range()]]
--[[linux-5.15/steal_suitable_fallback()]]
failed:
spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
}
-
--[[linux-5.15/spin_unlock_irqrestore()]]
*コメント [#tdf69a16]
終了行:
*参照元 [#y99b61f7]
#backlinks
*説明 [#cd36dd34]
-パス: [[linux-5.15/mm/page_alloc.c]]
-FIXME: これは何?
--説明
**引数 [#u34e2f23]
-struct zone *preferred_zone
--
--[[linux-5.15/zone]]
-struct zone *zone
--
--[[linux-5.15/zone]]
-unsigned int order
--
-gfp_t gfp_flags
--
--[[linux-5.15/gfp_t]]
-unsigned int alloc_flags
--
-int migratetype
--
**返り値 [#oecaaeca]
-struct page *
--
--[[linux-5.15/page]]
**参考 [#o5c9f81d]
*実装 [#c2edd239]
/*
* Allocate a page from the given zone. Use pcplists for...
*/
static inline
struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, unsigned int alloc_flags,
int migratetype)
{
unsigned long flags;
struct page *page;
if (likely(pcp_allowed_order(order))) {
/*
* MIGRATE_MOVABLE pcplist could have the pages on CMA...
* we need to skip it when CMA area isn't allowed.
*/
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA...
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype, alloc_flags);
goto out;
}
}
-
--[[linux-5.15/page]]
--[[linux-5.15/likely()]]
--[[linux-5.15/pcp_allowed_order()]]
--[[linux-5.15/IS_ENABLED()]]
--[[linux-5.15/rmqueue_pcplist()]]
/*
* We most definitely don't want callers attempting to
* allocate greater than order-1 page units with __GFP_...
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
spin_lock_irqsave(&zone->lock, flags);
-
--[[linux-5.15/WARN_ON_ONCE()]]
--[[linux-5.15/spin_lock_irqsave()]]
do {
page = NULL;
/*
* order-0 request can reach here when the pcplist is ...
* due to non-CMA allocation context. HIGHATOMIC area is
* reserved for high-order atomic allocation, so order-0
* request should skip it.
*/
if (order > 0 && alloc_flags & ALLOC_HARDER) {
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHAT...
if (page)
trace_mm_page_alloc_zone_locked(page, order, migrate...
}
if (!page)
page = __rmqueue(zone, order, migratetype, alloc_flag...
} while (page && check_new_pages(page, order));
if (!page)
goto failed;
-
--[[linux-5.15/__rmqueue_smallest()]]
--[[linux-5.15/trace_mm_page_alloc_zone_locked()]]
--[[linux-5.15/__rmqueue()]]
--[[linux-5.15/check_new_pages()]]
__mod_zone_freepage_state(zone, -(1 << order),
get_pcppage_migratetype(page));
spin_unlock_irqrestore(&zone->lock, flags);
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 <<...
zone_statistics(preferred_zone, zone, 1);
-
--[[linux-5.15/__mm_zone_freepage_state()]]
--[[linux-5.15/get_pcppage_migratetype()]]
--[[linux-5.15/spin_unlock_irqrestore()]]
--[[linux-5.15/__count_zid_vm_events()]]
--[[linux-5.15/page_zonenum()]]
--[[linux-5.15/zone_statistics()]]
out:
/* Separate test+clear to avoid unnecessary atomics */
if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
wakeup_kswapd(zone, 0, 0, zone_idx(zone));
}
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
-ゾーンのwatermark boostが設定されていたら、kswapdを起こ...
-ZONE_BOOSTED_WATERMARKを設定する箇所は、今のところsteal_...
--[[linux-5.15/test_bit()]]
--[[linux-5.15/clear_bit()]]
--[[linux-5.15/wakeup_kswapd()]]
--[[linux-5.15/zone_idx()]]
--[[linux-5.15/VM_BUG_ON_PAGE()]]
--[[linux-5.15/bad_range()]]
--[[linux-5.15/steal_suitable_fallback()]]
failed:
spin_unlock_irqrestore(&zone->lock, flags);
return NULL;
}
-
--[[linux-5.15/spin_unlock_irqrestore()]]
*コメント [#tdf69a16]
ページ名: