diff --git a/kernel/copy.cpp b/kernel/copy.cpp index 3f38b9d2..e3ae8e14 100644 --- a/kernel/copy.cpp +++ b/kernel/copy.cpp @@ -22,27 +22,116 @@ *******************************************************************************/ +#include +#include +#include #include +#include + #include #include +#include +#include +#include #include +// NOTE: The copy-from-user-space functions are specially made such that they +// guarantee the relevant memory exist and are unchanged during the copy +// operation. They do not protect against another thread concurrently +// modifying the memory, but the functions cope in that case and just +// transfer whatever memory they see in that case, they don't malfunction. + +// TODO: We could check the page tables for extra safely. + namespace Sortix { -// TODO: These are currently insecure, please check userspace tables before -// moving data to avoid security problems. - -bool CopyToUser(void* userdst, const void* ksrc, size_t count) +static bool IsInProgressAddressSpace(Process* process) { - memcpy(userdst, ksrc, count); - return true; + addr_t current_address_space; +#if defined(__i386__) + asm ( "mov %%cr3, %0" : "=r"(current_address_space) ); +#elif defined(__x86_64__) + asm ( "mov %%cr3, %0" : "=r"(current_address_space) ); +#else + #warning "You should set current_address_space for safety" + current_address_space = process->addrspace; +#endif + return current_address_space == process->addrspace; } -bool CopyFromUser(void* kdst, const void* usersrc, size_t count) +static struct segment* FindSegment(Process* process, uintptr_t addr) { - memcpy(kdst, usersrc, count); - return true; + for ( size_t i = 0; i < process->segments_used; i++ ) + { + struct segment* segment = &process->segments[i]; + if ( addr < segment->addr ) + continue; + if ( segment->addr + segment->size <= addr ) + continue; + return segment; + } + return NULL; +} + +bool CopyToUser(void* userdst_ptr, const void* ksrc_ptr, size_t count) +{ + uintptr_t userdst = (uintptr_t) userdst_ptr; + uintptr_t ksrc = (uintptr_t) ksrc_ptr; + bool result = true; + Process* process = CurrentProcess(); + assert(IsInProgressAddressSpace(process)); + kthread_mutex_lock(&process->segment_lock); + while ( count ) + { + struct segment* segment = FindSegment(process, userdst); + if ( !segment || !(segment->prot & PROT_WRITE) ) + { + errno = EFAULT; + result = false; + break; + } + size_t amount = count; + size_t segment_available = segment->addr + segment->size - userdst; + if ( segment_available < amount ) + amount = segment_available; + memcpy((void*) userdst, (const void*) ksrc, amount); + userdst += amount; + ksrc += amount; + count -= amount; + } + kthread_mutex_unlock(&process->segment_lock); + return result; +} + +bool CopyFromUser(void* kdst_ptr, const void* usersrc_ptr, size_t count) +{ + uintptr_t kdst = (uintptr_t) kdst_ptr; + uintptr_t usersrc = (uintptr_t) usersrc_ptr; + bool result = true; + Process* process = CurrentProcess(); + assert(IsInProgressAddressSpace(process)); + kthread_mutex_lock(&process->segment_lock); + while ( count ) + { + struct segment* segment = FindSegment(process, usersrc); + if ( !segment || !(segment->prot & PROT_READ) ) + { + errno = EFAULT; + result = false; + break; + } + size_t amount = count; + size_t segment_available = segment->addr + segment->size - usersrc; + if ( segment_available < amount ) + amount = segment_available; + memcpy((void*) kdst, (const void*) usersrc, amount); + kdst += amount; + usersrc += amount; + count -= amount; + } + kthread_mutex_unlock(&process->segment_lock); + return result; } bool CopyToKernel(void* kdst, const void* ksrc, size_t count) @@ -59,18 +148,93 @@ bool CopyFromKernel(void* kdst, const void* ksrc, size_t count) bool ZeroKernel(void* kdst, size_t count) { + // TODO: We could check the page tables for extra safely. memset(kdst, 0, count); return true; } -bool ZeroUser(void* userdst, size_t count) +bool ZeroUser(void* userdst_ptr, size_t count) { - return ZeroKernel(userdst, count); + uintptr_t userdst = (uintptr_t) userdst_ptr; + bool result = true; + Process* process = CurrentProcess(); + assert(IsInProgressAddressSpace(process)); + kthread_mutex_lock(&process->segment_lock); + while ( count ) + { + struct segment* segment = FindSegment(process, userdst); + if ( !segment || !(segment->prot & PROT_WRITE) ) + { + errno = EFAULT; + result = false; + break; + } + size_t amount = count; + size_t segment_available = segment->addr + segment->size - userdst; + if ( segment_available < amount ) + amount = segment_available; + memset((void*) userdst, 0, amount); + userdst += amount; + count -= amount; + } + kthread_mutex_unlock(&process->segment_lock); + return result; } -char* GetStringFromUser(const char* str) +// NOTE: No overflow can happen here because the user can't make an infinitely +// long string spanning the entire address space because the user can't +// control the entire address space. +char* GetStringFromUser(const char* usersrc_str) { - return String::Clone(str); + uintptr_t usersrc = (uintptr_t) usersrc_str; + size_t result_length = 0; + Process* process = CurrentProcess(); + assert(IsInProgressAddressSpace(process)); + + kthread_mutex_lock(&process->segment_lock); + while ( true ) + { + uintptr_t current_at = usersrc + result_length; + struct segment* segment = FindSegment(process, current_at); + if ( !segment || !(segment->prot & PROT_READ) ) + { + kthread_mutex_unlock(&process->segment_lock); + return errno = EFAULT, (char*) NULL; + } + size_t segment_available = segment->addr + segment->size - current_at; + volatile const char* str = (volatile const char*) current_at; + size_t length = 0; + for ( ; length < segment_available; length++ ) + { + char c = str[length]; + if ( c == '\0' ) + break; + length++; + } + result_length += length; + if ( length < segment_available ) + break; + } + + char* result = new char[result_length + 1]; + if ( !result ) + { + kthread_mutex_unlock(&process->segment_lock); + return (char*) NULL; + } + + memcpy(result, (const char*) usersrc, result_length); + result[result_length] = '\0'; + + // We have transferred a bunch of bytes from user-space and appended a zero + // byte. This is a string. If no concurrent threads were modifying the + // memory, this is the intended string. If the memory was modified, we got + // potential garbage followed by a NUL byte. This is a string, but probably + // not what was intended. If the garbage itself had a premature unexpected + // NUL byte, that's okay, the garbage string just got truncated. + + kthread_mutex_unlock(&process->segment_lock); + return result; } } // namespace Sortix diff --git a/kernel/memorymanagement.cpp b/kernel/memorymanagement.cpp index cdda2b59..295b0ca7 100644 --- a/kernel/memorymanagement.cpp +++ b/kernel/memorymanagement.cpp @@ -341,6 +341,12 @@ void* sys_mmap(void* addr_ptr, size_t size, int prot, int flags, int fd, if ( !MapMemory(process, new_segment.addr, new_segment.size, new_segment.prot) ) return MAP_FAILED; + // The pread will copy to user-space right requires this lock to be free. + // TODO: This means another thread can concurrently change this memory + // mapping while the memory-mapped contents are being delivered, + // resulting in an odd mix. + lock.Reset(); + // Read the file contents into the newly allocated memory. if ( !(flags & MAP_ANONYMOUS) ) {