structtimer_list { /* * All fields that change during normal runtime grouped to the * same cacheline */ structhlist_nodeentry; unsignedlong expires; void (*function)(struct timer_list *); u32 flags;
/* TODO 1: initialize timer */ timer_setup(&timer, timer_handler, 0); /* TODO 1: schedule timer for the first time */ mod_timer(&timer, jiffies + TIMER_TIMEOUT * HZ);
return0; }
staticvoid __exit timer_exit(void) { pr_info("[timer_exit] Exit module\n"); /* TODO 1: cleanup; make sure the timer is not running after we exit */ del_timer_sync(&timer); }
staticstructmy_device_data { structcdevcdev; /* TODO 1: add timer */ structtimer_listtimer; /* TODO 2: add flag */ unsignedint flag; /* TODO 3: add work */ structwork_structwork; /* TODO 4: add list for monitored processes */ structlist_headlist; /* TODO 4: add spinlock to protect list */ spinlock_t lock; } dev;
staticvoidalloc_io(void) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(5 * HZ); pr_info("Yawn! I've been sleeping for 5 seconds.\n"); }
rcu_read_lock(); task = pid_task(find_vpid(pid), PIDTYPE_PID); rcu_read_unlock(); if (!task) return ERR_PTR(-ESRCH);
p = kmalloc(sizeof(*p), GFP_ATOMIC); if (!p) return ERR_PTR(-ENOMEM);
get_task_struct(task); p->task = task;
return p; }
/* TODO 3: define work handler */ staticvoidwork_handler(struct work_struct *work) { alloc_io(); }
#define ALLOC_IO_DIRECT /* TODO 3: undef ALLOC_IO_DIRECT*/ #undef ALLOC_IO_DIRECT
staticvoidtimer_handler(struct timer_list *tl) { /* TODO 1: implement timer handler */ structmy_device_data *my_data = from_timer(my_data, tl, timer); pr_info("[timer_handler] pid = %d, comm = %s\n", current->pid, current->comm); /* TODO 2: check flags: TIMER_TYPE_SET or TIMER_TYPE_ALLOC */ switch (my_data->flag) { case TIMER_TYPE_SET: break; case TIMER_TYPE_ALLOC: // alloc_io(); /* TODO 3: schedule work */ schedule_work(&my_data->work); break; /* TODO 4: iterate the list and check the proccess state */ case TIMER_TYPE_MON: { structmon_proc *p, *n; spin_lock(&my_data->lock); /* TODO 4: if task is dead print info ... */ /* TODO 4: ... decrement task usage counter ... */ /* TODO 4: ... remove it from the list ... */ /* TODO 4: ... free the struct mon_proc */ list_for_each_entry_safe(p, n, &my_data->list, list){ if (p->task->state == TASK_DEAD) { pr_info("task %s (%d) is dead\n", p->task->comm, p->task->pid); put_task_struct(p->task); list_del(&p->list); kfree(p); } } spin_unlock(&my_data->lock);
switch (cmd) { case MY_IOCTL_TIMER_SET: /* TODO 2: set flag */ my_data->flag = TIMER_TYPE_SET; /* TODO 1: schedule timer */ mod_timer(&dev.timer,jiffies + arg * HZ); break; case MY_IOCTL_TIMER_CANCEL: /* TODO 1: cancel timer */ del_timer_sync(&dev.timer); break; case MY_IOCTL_TIMER_ALLOC: /* TODO 2: set flag and schedule timer */ my_data->flag = TIMER_TYPE_ALLOC; mod_timer(&dev.timer,jiffies + arg * HZ); break; case MY_IOCTL_TIMER_MON: { /* TODO 4: use get_proc() and add task to list */ structmon_proc *p = get_proc(current->pid); /* TODO 4: protect access to list */ spin_lock_bh(&my_data->lock); list_add(&p->list,&my_data->list); spin_unlock_bh(&my_data->lock); /* TODO 4: set flag and schedule timer */ my_data->flag = TIMER_TYPE_MON; mod_timer(&my_data->timer,jiffies + arg * HZ); break; } default: return -ENOTTY; } return0; }
/* TODO 1: Cleanup: make sure the timer is not running after exiting. */ del_timer_sync(&dev.timer); /* TODO 3: Cleanup: make sure the work handler is not scheduled. */ flush_scheduled_work(); /* TODO 4: Cleanup the monitered process list */ list_for_each_entry_safe(p, n, &dev.list, list) { /* TODO 4: ... decrement task usage counter ... */ /* TODO 4: ... remove it from the list ... */ /* TODO 4: ... free the struct mon_proc */ put_task_struct(p->task); list_del(&p->list); kfree(p); } }
intmy_thread_f(void *data) { pr_info("[my_thread_f] Current process id is %d (%s)\n", current->pid, current->comm); /* TODO: Wait for command to remove module on wq_stop_thread queue. */ wait_event_interruptible(wq_stop_thread,atomic_read(&flag_stop_thread)!=0); /* TODO: set flag to mark kernel thread termination */ atomic_set(&flag_thread_terminated,1); /* TODO: notify the unload process that we have exited */ wake_up_interruptible(&wq_thread_terminated); pr_info("[my_thread_f] Exiting\n"); do_exit(0); }
staticint __init kthread_init(void) { pr_info("[kthread_init] Init module\n"); /* TODO: init the waitqueues and flags */ init_waitqueue_head(&wq_stop_thread); init_waitqueue_head(&wq_thread_terminated); atomic_set(&flag_stop_thread,0); atomic_set(&flag_thread_terminated,0); /* TODO: create and start the kernel thread */ structtask_struct * kt = kthread_create(my_thread_f,NULL,"yhellow"); wake_up_process(kt);
return0; }
staticvoid __exit kthread_exit(void) { /* TODO: notify the kernel thread that its time to exit */ atomic_set(&flag_stop_thread,1); wake_up_interruptible(&wq_stop_thread); /* TODO: wait for the kernel thread to exit */ wait_event_interruptible(wq_thread_terminated,atomic_read(&flag_thread_terminated) != 0); pr_info("[kthread_exit] Exit module\n"); }