*参照元 [#u26fa5e1] #backlinks *説明 [#s33392ff] -パス: 複数あり --CONFIG_MMU 有効: [[linux-4.4.1/mm/mmap.c]] --CONFIG_MMU 無効: [[linux-4.4.1/mm/nommu.c]] -FIXME: これは何? --説明 **引数 [#md56d548] -struct file *file -- --[[linux-4.4.1/file]] -unsigned long addr -- -unsigned long len -- -unsigned long prot -- -unsigned long flags -- -vm_flags_t vm_flags - --[[linux-4.4.1/vm_flags_t]] -unsigned long pgoff -- -unsigned long *populate -- **返り値 [#a7f2941d] -unsigned long -- **参考 [#dd70ae03] *実装 [#b3e8556d] **CONFIG_MMU 有効: mm/mmap.c [#r7ba5410] /* * The caller must hold down_write(¤t->mm->mmap_sem). */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate) { struct mm_struct *mm = current->mm; - --[[linux-4.4.1/mm_struct]] --[[linux-4.4.1/current(global)]] *populate = 0; if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; - --[[linux-4.4.1/PROT_READ]] --[[linux-4.4.1/path_noexec()]] --[[linux-4.4.1/PROT_EXEC]] if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); - --[[linux-4.4.1/MAP_FIXED]] --[[linux-4.4.1/round_hint_to_min()]] /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; - --[[linux-4.4.1/PAGE_ALIGN()]] /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (offset_in_page(addr)) return addr; - --[[linux-4.4.1/get_unmmapped_area()]] --[[linux-4.4.1/offset_in_page()]] /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; - --[[linux-4.4.1/calc_vm_prot_bits()]] --[[linux-4.4.1/calc_vm_flag_bis()]] --[[linux-4.4.1/VM_MAYREAD]] --[[linux-4.4.1/VM_MAYWRITE]] --[[linux-4.4.1/VM_MAY_EXEC]] if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; - --[[linux-4.4.1/MAP_LOCKED]] --[[linux-4.4.1/can_do_mlock()]] if (mlock_future_check(mm, vm_flags, len)) return -EAGAIN; - --[[linux-4.4.1/mlock_future_check()]] if (file) { struct inode *inode = file_inode(file); - --[[linux-4.4.1/inode]] --[[linux-4.4.1/file_inode()]] switch (flags & MAP_TYPE) { case MAP_SHARED: if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES; /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; - --[[linux-4.4.1/IS_APPEND()]] /* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(file)) return -EAGAIN; - --[[linux-4.4.1/locks_verify_locked()]] vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); /* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; - --[[linux-4.4.1/sysctl_overcommit_memory(global)]] --[[linux-4.4.1/OVERCOMMIT_NEVER]] --[[linux-4.4.1/VM_NORESERVE]] /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff); - --[[linux-4.4.1/mmap_region()]] if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } **CONFIG_MMU 無効: mm/nommu.c [#n2c5c527] /* * handle mapping creation for uClinux */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate) { struct vm_area_struct *vma; struct vm_region *region; struct rb_node *rb; unsigned long capabilities, result; int ret; - --[[linux-4.4.1/vm_area_struct]] --[[linux-4.4.1/vm_region]] --[[linux-4.4.1/rb_node]] *populate = 0; /* decide whether we should attempt the mapping, and if so what sort of * mapping */ ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, &capabilities); if (ret < 0) return ret; - --[[linux-4.4.1/validate_mmap_request()]] /* we ignore the address hint */ addr = 0; len = PAGE_ALIGN(len); - --[[linux-4.4.1/PAGE_ALIGN()]] /* we've determined that we can make the mapping, now translate what we * now know into VMA flags */ vm_flags |= determine_vm_flags(file, prot, flags, capabilities); - --[[linux-4.4.1/determine_vm_flags()]] /* we're going to need to record the mapping */ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); if (!region) goto error_getting_region; vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) goto error_getting_vma; - --[[linux-4.4.1/kmem_cache_zalloc()]] --[[linux-4.4.1/GFP_KERNEL]] --[[linux-4.4.1/vm_region_jar(global)]] --[[linux-4.4.1/vm_area_cachep(global)]] region->vm_usage = 1; region->vm_flags = vm_flags; region->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_flags = vm_flags; vma->vm_pgoff = pgoff; if (file) { region->vm_file = get_file(file); vma->vm_file = get_file(file); } - --[[linux-4.4.1/INIT_LIST_HEAD()]] --[[linux-4.4.1/get_file()]] down_write(&nommu_region_sem); - --[[linux-4.4.1/down_write()]] /* if we want to share, we need to check for regions created by other * mmap() calls that overlap with our proposed mapping * - we can only share with a superset match on most regular files * - shared mappings on character devices and memory backed files are * permitted to overlap inexactly as far as we are concerned for in * these cases, sharing is handled in the driver or filesystem rather * than here */ if (vm_flags & VM_MAYSHARE) { struct vm_region *pregion; unsigned long pglen, rpglen, pgend, rpgend, start; - --[[linux-4.4.1/VM_MAYSHARE]] --[[linux-4.4.1/vm_region]] pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; pgend = pgoff + pglen; - --[[linux-4.4.1/PAGE_SHIFT]] for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { pregion = rb_entry(rb, struct vm_region, vm_rb); - --[[linux-4.4.1/rb_first()]] --[[linux-4.4.1/nommu_region_tree(global)]] --[[linux-4.4.1/rb_next()]] --[[linux-4.4.1/rb_entry()]] --[[linux-4.4.1/vm_region]] if (!(pregion->vm_flags & VM_MAYSHARE)) continue; /* search for overlapping mappings on the same file */ if (file_inode(pregion->vm_file) != file_inode(file)) continue; - --[[linux-4.4.1/file_inode()]] if (pregion->vm_pgoff >= pgend) continue; rpglen = pregion->vm_end - pregion->vm_start; rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; rpgend = pregion->vm_pgoff + rpglen; if (pgoff >= rpgend) continue; /* handle inexactly overlapping matches between * mappings */ if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { /* new mapping is not a subset of the region */ if (!(capabilities & NOMMU_MAP_DIRECT)) goto sharing_violation; continue; } /* we've found a region we can share */ pregion->vm_usage++; vma->vm_region = pregion; start = pregion->vm_start; start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; vma->vm_start = start; vma->vm_end = start + len; if (pregion->vm_flags & VM_MAPPED_COPY) vma->vm_flags |= VM_MAPPED_COPY; else { ret = do_mmap_shared_file(vma); if (ret < 0) { vma->vm_region = NULL; vma->vm_start = 0; vma->vm_end = 0; pregion->vm_usage--; pregion = NULL; goto error_just_free; } } - --[[linux-4.4.1/do_mmap_shared_file()]] fput(region->vm_file); kmem_cache_free(vm_region_jar, region); region = pregion; result = start; goto share; - --[[linux-4.4.1/fput()]] --[[linux-4.4.1/kmem_cache_free()]] --[[linux-4.4.1/vm_region_jar(global)]] } /* obtain the address at which to make a shared mapping * - this is the hook for quasi-memory character devices to * tell us the location of a shared mapping */ if (capabilities & NOMMU_MAP_DIRECT) { addr = file->f_op->get_unmapped_area(file, addr, len, pgoff, flags); -file->f_op は 型 --[[linux-4.4.1/]] -file->f_op は const struct file_operations * 型 --[[linux-4.4.1/file_operations]] if (IS_ERR_VALUE(addr)) { - --[[linux-4.4.1/IS_ERR_VALUE()]] ret = addr; if (ret != -ENOSYS) goto error_just_free; /* the driver refused to tell us where to site * the mapping so we'll have to attempt to copy * it */ ret = -ENODEV; if (!(capabilities & NOMMU_MAP_COPY)) goto error_just_free; capabilities &= ~NOMMU_MAP_DIRECT; - --[[linux-4.4.1/NOMMU_MAP_COPY]] --[[linux-4.4.1/NOMMU_MAP_DIRECT]] } else { vma->vm_start = region->vm_start = addr; vma->vm_end = region->vm_end = addr + len; } } } vma->vm_region = region; /* set up the mapping * - the region is filled in if NOMMU_MAP_DIRECT is still set */ if (file && vma->vm_flags & VM_SHARED) ret = do_mmap_shared_file(vma); else ret = do_mmap_private(vma, region, len, capabilities); if (ret < 0) goto error_just_free; add_nommu_region(region); - --[[linux-4.4.1/do_mmap_shared_file()]] --[[linux-4.4.1/do_mmap_private()]] --[[linux-4.4.1/add_nommu_region()]] --[[linux-4.4.1/VM_SHARED]] /* clear anonymous mappings that don't ask for uninitialized data */ if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) memset((void *)region->vm_start, 0, region->vm_end - region->vm_start); /* okay... we have a mapping; now we have to register it */ result = vma->vm_start; current->mm->total_vm += len >> PAGE_SHIFT; - --[[linux-4.4.1/current(global)]] --[[linux-4.4.1/PAGE_SHIFT]] share: add_vma_to_mm(current->mm, vma); - --[[linux-4.4.1/add_vma_to_mm()]] /* we flush the region from the icache only when the first executable * mapping of it is made */ if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { flush_icache_range(region->vm_start, region->vm_end); region->vm_icache_flushed = true; } - --[[linux-4.4.1/flush_icache_range()]] up_write(&nommu_region_sem); - --[[linux-4.4.1/up_write()]] --[[linux-4.4.1/nommu_region_sem(global)]] return result; error_just_free: up_write(&nommu_region_sem); error: if (region->vm_file) fput(region->vm_file); kmem_cache_free(vm_region_jar, region); if (vma->vm_file) fput(vma->vm_file); kmem_cache_free(vm_area_cachep, vma); return ret; - --[[linux-4.4.1/fput()]] --[[linux-4.4.1/kmem_cache_free()]] --[[linux-4.4.1/vm_region_jar(global)]] --[[linux-4.4.1/vm_area_cachep(global)]] sharing_violation: up_write(&nommu_region_sem); pr_warn("Attempt to share mismatched mappings\n"); ret = -EINVAL; goto error; - --[[linux-4.4.1/pr_warn()]] error_getting_vma: kmem_cache_free(vm_region_jar, region); pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", len, current->pid); show_free_areas(0); return -ENOMEM; - --[[linux-4.4.1/show_free_areas()]] --[[linux-4.4.1/kmem_cache_free()]] --[[linux-4.4.1/vm_region_jar(global)]] error_getting_region: pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", len, current->pid); show_free_areas(0); return -ENOMEM; } *コメント [#m8ce7d4f]