#author("2025-09-13T02:00:27+09:00","default:guest","guest") *参照元 [#x398a6b5] #backlinks *説明 [#mb6f287e] -パス: [[linux-5.15/mm/page_alloc.c]] -FIXME: これは何? --説明 **引数 [#y5f87496] -struct zone *zone -- --[[linux-5.15/zone]] -unsigned int order -- -int migratetype -- -unsigned int alloc_flags -- **返り値 [#y423ef66] -struct page * -- --[[linux-5.15/page]] **参考 [#s986cab1] *実装 [#sc8c51f0] /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ static __always_inline struct page * __rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) { struct page *page; if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by * allocating from CMA when over half of the zone's free memory * is in the CMA area. */ if (alloc_flags & ALLOC_CMA && zone_page_state(zone, NR_FREE_CMA_PAGES) > zone_page_state(zone, NR_FREE_PAGES) / 2) { page = __rmqueue_cma_fallback(zone, order); if (page) goto out; } } - --[[linux-5.15/IS_ENABLED()]] --[[linux-5.15/zone_page_state()]] --[[linux-5.15/__rmqueue_cmd_fallback()]] retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { if (alloc_flags & ALLOC_CMA) page = __rmqueue_cma_fallback(zone, order); if (!page && __rmqueue_fallback(zone, order, migratetype, alloc_flags)) goto retry; } - --[[linux-5.15/__rmqueue_smallest()]] --[[linux-5.15/unlikely()]] --[[linux-5.15/__rmqueue_cma_fallback()]] --[[linux-5.15/__rmqueue_fallback()]] out: if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } - --[[linux-5.15/trace_mm_page_alloc_zone_locked()]] *コメント [#r019216b]