0

I am writing a system call to save the context of a process i.e., dynamically allocated anonymous memory by a process. If the argument to the system call, i.e., state, is 0 then the process context (dynamically allocated anonymous memory) is copied from the process address space (mm -> mmap) to the kernel buffer and is added to a task struct's list_head field (context). Here is my implementation:

struct page_context {
    unsigned long addr;
    unsigned char *buffer;
    struct list_head list;
 };

SYSCALL_DEFINE1(mmcontext, int, state)
{
    struct task_struct *task;
    int res;
    struct mm_struct *mm;
    struct vm_area_struct *vma;
    unsigned long start_addr, end_addr, curr_addr;
    struct page_context *pctx;

    if (state == 0) {
        task = current;
        if(!list_empty(&task -> context)) {
            printk(KERN_INFO "Process already has its context saved\n");
            return -EINVAL;
        }

        mm = task->mm;

        down_write(&mm->mmap_lock);

        // Traverse the VMA list and copy pages
        vma = mm->mmap;
        printk(KERN_INFO "PAGE_SIZE = %zu\n", PAGE_SIZE);
        while (vma) {
            if (vma->vm_flags & VM_WRITE) {
                start_addr = vma->vm_start;
                end_addr = vma->vm_end;
                curr_addr = start_addr;
                while (curr_addr < end_addr) {
                       pctx = kmalloc(sizeof(struct page_context), GFP_KERNEL);
                       if (!pctx) {
                       printk(KERN_ERR "Failed to allocate memory for page context\n");
                       return -ENOMEM;
                       }
                       printk(KERN_INFO "Allocated process context\n");
                       pctx->addr = curr_addr;
                       pctx->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
                       INIT_LIST_HEAD(&pctx->list);
                       if (!pctx -> buffer) {
                           printk(KERN_ERR "Failed to allocate memory for page context buffer\n");
                           kfree(pctx);
                           return -ENOMEM;
                       }
                       printk(KERN_INFO "Before memcpy from userspace to kernel\n");
                       // memcpy((void *)pctx->buffer, (void *)curr_addr, PAGE_SIZE);
                       res = copy_from_user((void *)pctx->buffer, (void *)curr_addr, PAGE_SIZE);
                       if (res != 0) {
                       printk(KERN_ERR "Failed to copy %d bytes from userspace to kernel space\n", res);
                       }
                       printk(KERN_INFO "Copied memory from curr_addr to buffer\n");
                       list_add(&pctx->list, &task -> context);
                       num_pages++;

                       curr_addr += PAGE_SIZE;
                }
            }
            vma = vma->vm_next;    
       }
       up_write(&mm->mmap_lock);

       return 0;
   }
}

Problem is that the copy_from_user is causing kernel panic for some addresses in the vma. I also tried memcpy to copy data from process address space to kernel buffer but it is also causing kernel panic throwing supervisor mode read access exception.

I think this is due to the memory chunk being copied is unaligned.

How can I fix this? Or is there any other way to do the required task?

m_here
  • 41
  • 7
  • Possible related: https://stackoverflow.com/questions/8265657/how-does-copy-from-user-from-the-linux-kernel-work-internally – Barmar Apr 07 '23 at 22:44
  • 1
    Just a guess ... Assuming your code is otherwise correct/bugfree, you're doing `down_write(&mm->mmap_lock);` and then `kmalloc` and `copy_from_user`. The locking may be an issue because when `copy_from_user` is called, the kernel _must_ be free to page fault [on the user address space], so paged out pages can be faulted in. The system may need to adjust mappings, and holding the semaphore across this may be problematic. – Craig Estey Apr 08 '23 at 02:16
  • You are always copying PAGE_SIZE regardless of end_addr. This seems like it can be a problem on the last page – stark Apr 09 '23 at 19:45

0 Answers0