/* $NetBSD: vm_machdep.c,v 1.5 2018/01/24 09:04:45 skrll Exp $ */ /*- * Copyright (c) 2007 Jared D. McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.5 2018/01/24 09:04:45 skrll Exp $"); #include #include #include #include /* * Map a user I/O request into kernel virtual address space. * Note: the pages are already locked by uvm_vslock(), so we * do not need to pass an access_type to pmap_enter(). */ /* This code was originally stolen from the alpha port. */ int vmapbuf(struct buf *bp, vsize_t len) { vaddr_t faddr, taddr, off; paddr_t pa; struct proc *p; vm_prot_t prot; if ((bp->b_flags & B_PHYS) == 0) panic("vmapbuf"); p = bp->b_proc; bp->b_saveaddr = bp->b_data; faddr = trunc_page((vaddr_t)bp->b_data); off = (vaddr_t)bp->b_data - faddr; len = round_page(off + len); taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); bp->b_data = (void *)(taddr + off); len = atop(len); prot = bp->b_flags & B_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; while (len--) { if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr, &pa) == false) panic("vmapbuf: null page frame"); pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa), prot, prot | PMAP_WIRED); faddr += PAGE_SIZE; taddr += PAGE_SIZE; } pmap_update(vm_map_pmap(phys_map)); return 0; } /* * Unmap a previously-mapped user I/O request. */ void vunmapbuf(struct buf *bp, vsize_t len) { vaddr_t addr, off; if ((bp->b_flags & B_PHYS) == 0) panic("vunmapbuf"); addr = trunc_page((vaddr_t)bp->b_data); off = (vaddr_t)bp->b_data - addr; len = round_page(off + len); pmap_remove(vm_map_pmap(phys_map), addr, addr + len); pmap_update(vm_map_pmap(phys_map)); uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY); bp->b_data = bp->b_saveaddr; bp->b_saveaddr = NULL; }