#author("2025-10-24T12:42:27+09:00","default:guest","guest")

#author("2025-10-24T12:42:37+09:00","default:guest","guest")
*参照元 [#s763da5f]
#backlinks

*説明 [#m9bed659]
-パス: [[linux-5.15/]]
-パス: [[linux-5.15/mm/page_alloc.c]]

-FIXME: これは何?
--説明


**引数 [#j02f0876]
-gfp_t gfp
--
-unsigned int order
--
-int preferred_nid
--
-nodemask_t *nodemask
--
--[[linux-5.15/nodemask_t]]


**返り値 [#yc175a6b]
-struct page *
--
--[[linux-5.15/page]]


**参考 [#rbe7a4c6]


*実装 [#t5db9103]

 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask)
 {
 	struct page *page;
 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
 	gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
 	struct alloc_context ac = { };
 
 	/*
 	 * There are several places where we assume that the order value is sane
 	 * so bail out early if the request is out of bound.
 	 */
 	if (unlikely(order >= MAX_ORDER)) {
 		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
 		return NULL;
 	}
 
-
--[[linux-5.15/page]]
--[[linux-5.15/gfp_t]]
--[[linux-5.15/alloc_context]]
--[[linux-5.15/unlikely()]]
--[[linux-5.15/WARN_ON_ONCE()]]

 	gfp &= gfp_allowed_mask;
 	/*
 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
 	 * from a particular context which has been marked by
 	 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
 	 * movable zones are not used during allocation.
 	 */
 	gfp = current_gfp_context(gfp);
 	alloc_gfp = gfp;
 	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
 			&alloc_gfp, &alloc_flags))
 		return NULL;
 
-
--[[linux-5.15/current_gfp_context()]]
--[[linux-5.15/prepare_alloc_pages()]]

 	/*
 	 * Forbid the first pass from falling back to types that fragment
 	 * memory until all local zones are considered.
 	 */
 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
 
 	/* First allocation attempt */
 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
 	if (likely(page))
 		goto out;
 
 	alloc_gfp = gfp;
 	ac.spread_dirty_pages = false;
 
-
--[[linux-5.15/alloc_flags_nofragment()]]
--[[linux-5.15/get_page_from_freelist()]]
--[[linux-5.15/likely()]]

 	/*
 	 * Restore the original nodemask if it was potentially replaced with
 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
 	 */
 	ac.nodemask = nodemask;
 
 	page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
 
 out:
 	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
 	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
 		__free_pages(page, order);
 		page = NULL;
 	}
 
-
--[[linux-5.15/__alloc_pages_slowpath()]]
--[[linux-5.15/memcg_kmem_enabled()]]
--[[linux-5.15/unlikely()]]
--[[linux-5.15/__memcg_kmem_charge_page()]]
--[[linux-5.15/__free_pages()]]

 	trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
 
 	return page;
 }
 EXPORT_SYMBOL(__alloc_pages);

-
--[[linux-5.15/trace_mm_page_alloc()]]
--[[linux-5.15/EXPORT_SYMBOL()]]


*コメント [#g1472970]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS