linux-4.4.1/ion_buffer_create()
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#w9505546]
#backlinks
*説明 [#k5f4c15e]
-パス: [[linux-4.4.1/drivers/staging/android/ion/ion.c]]
-FIXME: これは何?
--説明
**引数 [#ecbf3c1a]
-struct ion_heap *heap
-
--[[linux-4.4.1/ion_heap]]
-struct ion_device *dev
-
--[[linux-4.4.1/ion_device]]
-unsigned long len
--
-unsigned long align
--
-unsigned long flags
--
**返り値 [#x32d0f70]
-struct ion_buffer *
--
--[[linux-4.4.1/ion_buffer]]
**参考 [#za01f3b9]
*実装 [#ha64a526]
/* this function should only be called while dev->lock i...
static struct ion_buffer *ion_buffer_create(struct ion_h...
struct ion_device *...
unsigned long len,
unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
struct scatterlist *sg;
int i, ret;
-
--[[linux-4.4.1/ion_buffer]]
--[[linux-4.4.1/sg_table]]
--[[linux-4.4.1/scatterlist]]
buffer = kzalloc(sizeof(struct ion_buffer), GFP_...
if (!buffer)
return ERR_PTR(-ENOMEM);
-
--[[linux-4.4.1/kzalloc()]]
--[[linux-4.4.1/GFP_KERNEL]]
--[[linux-4.4.1/ERR_PTR()]]
buffer->heap = heap;
buffer->flags = flags;
kref_init(&buffer->ref);
-
--[[linux-4.4.1/kref_init()]]
ret = heap->ops->allocate(heap, buffer, len, ali...
-
-heap->ops は struct ion_heap_ops * 型
--[[linux-4.4.1/ion_heap_ops]]
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_...
goto err2;
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, ...
flags);
if (ret)
goto err2;
}
-
--[[linux-4.4.1/ion_heap_freelist_drain()]]
buffer->dev = dev;
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should retur...
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
-
--[[linux-4.4.1/WARN_ONCE()]]
--[[linux-4.4.1/IS_ERR()]]
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
-
--[[linux-4.4.1/ion_buffer_fault_user_mappings()]]
int num_pages = PAGE_ALIGN(buffer->size)...
struct scatterlist *sg;
int i, j, k = 0;
-
--[[linux-4.4.1/PAGE_ALIGN()]]
--[[linux-4.4.1/PAGE_SIZE]]
buffer->pages = vmalloc(sizeof(struct pa...
if (!buffer->pages) {
ret = -ENOMEM;
goto err;
}
-
--[[linux-4.4.1/page]]
--[[linux-4.4.1/vmalloc()]]
for_each_sg(table->sgl, sg, table->nents...
struct page *page = sg_page(sg);
for (j = 0; j < sg->length / PAG...
buffer->pages[k++] = pag...
}
-
--[[linux-4.4.1/for_each_sg()]]
--[[linux-4.4.1/sg_page()]]
}
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
-
--[[linux-4.4.1/INIT_LIST_HEAD()]]
--[[linux-4.4.1/mutex_init()]]
/*
* this will set up dma addresses for the sglist...
* technically correct as per the dma api -- a s...
* device isn't really taking ownership here. H...
* our systems the only dma_address space is phy...
* Additionally, we can't afford the overhead of...
* allocation via dma_map_sg. The implicit contr...
* memory coming from the heaps is ready for dma...
* cached mapping that mapping has been invalida...
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->s...
sg_dma_address(sg) = sg_phys(sg);
-
--[[linux-4.4.1/sg_dma_address()]]
--[[linux-4.4.1/sg_phys()]]
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
-
--[[linux-4.4.1/mutex_lock()]]
--[[linux-4.4.1/ion_buffer_add()]]
--[[linux-4.4.1/mutex_unlock()]]
err:
heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
-
--[[linux-4.4.1/kfree()]]
}
*コメント [#nc2855c6]
終了行:
*参照元 [#w9505546]
#backlinks
*説明 [#k5f4c15e]
-パス: [[linux-4.4.1/drivers/staging/android/ion/ion.c]]
-FIXME: これは何?
--説明
**引数 [#ecbf3c1a]
-struct ion_heap *heap
-
--[[linux-4.4.1/ion_heap]]
-struct ion_device *dev
-
--[[linux-4.4.1/ion_device]]
-unsigned long len
--
-unsigned long align
--
-unsigned long flags
--
**返り値 [#x32d0f70]
-struct ion_buffer *
--
--[[linux-4.4.1/ion_buffer]]
**参考 [#za01f3b9]
*実装 [#ha64a526]
/* this function should only be called while dev->lock i...
static struct ion_buffer *ion_buffer_create(struct ion_h...
struct ion_device *...
unsigned long len,
unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
struct scatterlist *sg;
int i, ret;
-
--[[linux-4.4.1/ion_buffer]]
--[[linux-4.4.1/sg_table]]
--[[linux-4.4.1/scatterlist]]
buffer = kzalloc(sizeof(struct ion_buffer), GFP_...
if (!buffer)
return ERR_PTR(-ENOMEM);
-
--[[linux-4.4.1/kzalloc()]]
--[[linux-4.4.1/GFP_KERNEL]]
--[[linux-4.4.1/ERR_PTR()]]
buffer->heap = heap;
buffer->flags = flags;
kref_init(&buffer->ref);
-
--[[linux-4.4.1/kref_init()]]
ret = heap->ops->allocate(heap, buffer, len, ali...
-
-heap->ops は struct ion_heap_ops * 型
--[[linux-4.4.1/ion_heap_ops]]
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_...
goto err2;
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, ...
flags);
if (ret)
goto err2;
}
-
--[[linux-4.4.1/ion_heap_freelist_drain()]]
buffer->dev = dev;
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should retur...
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
-
--[[linux-4.4.1/WARN_ONCE()]]
--[[linux-4.4.1/IS_ERR()]]
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
-
--[[linux-4.4.1/ion_buffer_fault_user_mappings()]]
int num_pages = PAGE_ALIGN(buffer->size)...
struct scatterlist *sg;
int i, j, k = 0;
-
--[[linux-4.4.1/PAGE_ALIGN()]]
--[[linux-4.4.1/PAGE_SIZE]]
buffer->pages = vmalloc(sizeof(struct pa...
if (!buffer->pages) {
ret = -ENOMEM;
goto err;
}
-
--[[linux-4.4.1/page]]
--[[linux-4.4.1/vmalloc()]]
for_each_sg(table->sgl, sg, table->nents...
struct page *page = sg_page(sg);
for (j = 0; j < sg->length / PAG...
buffer->pages[k++] = pag...
}
-
--[[linux-4.4.1/for_each_sg()]]
--[[linux-4.4.1/sg_page()]]
}
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
-
--[[linux-4.4.1/INIT_LIST_HEAD()]]
--[[linux-4.4.1/mutex_init()]]
/*
* this will set up dma addresses for the sglist...
* technically correct as per the dma api -- a s...
* device isn't really taking ownership here. H...
* our systems the only dma_address space is phy...
* Additionally, we can't afford the overhead of...
* allocation via dma_map_sg. The implicit contr...
* memory coming from the heaps is ready for dma...
* cached mapping that mapping has been invalida...
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->s...
sg_dma_address(sg) = sg_phys(sg);
-
--[[linux-4.4.1/sg_dma_address()]]
--[[linux-4.4.1/sg_phys()]]
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
-
--[[linux-4.4.1/mutex_lock()]]
--[[linux-4.4.1/ion_buffer_add()]]
--[[linux-4.4.1/mutex_unlock()]]
err:
heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
-
--[[linux-4.4.1/kfree()]]
}
*コメント [#nc2855c6]
ページ名: