*参照元 [#r9396f1b] #backlinks *説明 [#m3b6c6a3] -パス: [[linux-4.4.1/mm/page_alloc.c]] -FIXME: これは何? --説明 **引数 [#s63d58ab] -struct zone *zone -- --[[linux-4.4.1/zone]] -unsigned int order -- -unsigned long count -- -struct list_head *list -- --[[linux-4.4.1/list_head]] -int migratetype -- --[[linux-4.4.1/cold]] -bool cold -- **返り値 [#fd50a6cc] -int -- **参考 [#g6660778] *実装 [#zb8d4b8e] /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, bool cold) { int i; spin_lock(&zone->lock); - --[[linux-4.4.1/spin_lock()]] for (i = 0; i < count; ++i) { struct page *page = __rmqueue(zone, order, migratetype, 0); if (unlikely(page == NULL)) break; - --[[linux-4.4.1/page]] --[[linux-4.4.1/__rmqueue()]] --[[linux-4.4.1/unlikely()]] /* * Split buddy pages returned by expand() are received here * in physical page order. The page is added to the callers and * list and the list head then moves forward. From the callers * perspective, the linked list is ordered by page number in * some conditions. This is useful for IO devices that can * merge IO requests if the physical pages are ordered * properly. */ if (likely(!cold)) list_add(&page->lru, list); else list_add_tail(&page->lru, list); - --[[linux-4.4.1/likely()]] --[[linux-4.4.1/list_add()]] --[[linux-4.4.1/list_add_tail()]] list = &page->lru; if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); - --[[linux-4.4.1/is_migrate_cma()]] --[[linux-4.4.1/get_pcppage_migratetype()]] --[[linux-4.4.1/__mod_zone_page_state()]] } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); - --[[linux-4.4.1/__mod_zone_page_state()]] --[[linux-4.4.1/spin_unlock()]] return i; } *コメント [#sb217229]