Commits

Dimitris Leventeas  committed 7309e8d

Initial Release

  • Participants

Comments (0)

Files changed (24)

File 1.1/usr/src/kernel/proc.h

+#ifndef PROC_H
+#define PROC_H
+
+/* Here is the declaration of the process table.  It contains all process
+ * data, including registers, flags, scheduling priority, memory map, 
+ * accounting, message passing (IPC) information, and so on. 
+ *
+ * Many assembly code routines reference fields in it.  The offsets to these
+ * fields are defined in the assembler include file sconst.h.  When changing
+ * struct proc, be sure to change sconst.h to match.
+ */
+#include <minix/com.h>
+#include "protect.h"
+#include "const.h"
+#include "priv.h"
+ 
+struct proc {
+  struct stackframe_s p_reg;	/* process' registers saved in stack frame */
+
+#if (CHIP == INTEL)
+  reg_t p_ldt_sel;		/* selector in gdt with ldt base and limit */
+  struct segdesc_s p_ldt[2+NR_REMOTE_SEGS]; /* CS, DS and remote segments */
+#endif 
+
+#if (CHIP == M68000)
+/* M68000 specific registers and FPU details go here. */
+#endif 
+
+  proc_nr_t p_nr;		/* number of this process (for fast access) */
+  struct priv *p_priv;		/* system privileges structure */
+  short p_rts_flags;		/* process is runnable only if zero */
+  short p_misc_flags;		/* flags that do suspend the process */
+
+  char p_priority;		/* current scheduling priority */
+  char p_max_priority;		/* maximum scheduling priority */
+  char p_ticks_left;		/* number of scheduling ticks left */
+  char p_quantum_size;		/* quantum size in ticks */
+
+  struct mem_map p_memmap[NR_LOCAL_SEGS];   /* memory map (T, D, S) */
+
+  clock_t p_user_time;		/* user time in ticks */
+  clock_t p_sys_time;		/* sys time in ticks */
+
+  struct proc *p_nextready;	/* pointer to next ready process */
+  struct proc *p_caller_q;	/* head of list of procs wishing to send */
+  struct proc *p_q_link;	/* link to next proc wishing to send */
+  message *p_messbuf;		/* pointer to passed message buffer */
+  int p_getfrom_e;		/* from whom does process want to receive? */
+  int p_sendto_e;		/* to whom does process want to send? */
+
+  sigset_t p_pending;		/* bit map for pending kernel signals */
+
+  char p_name[P_NAME_LEN];	/* name of the process, including \0 */
+
+  int p_endpoint;		/* endpoint number, generation-aware */
+
+#if DEBUG_SCHED_CHECK
+  int p_ready, p_found;
+#endif
+};
+
+/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
+#define SLOT_FREE	0x01	/* process slot is free */
+#define NO_MAP		0x02	/* keeps unmapped forked child from running */
+#define SENDING		0x04	/* process blocked trying to send */
+#define RECEIVING	0x08	/* process blocked trying to receive */
+#define SIGNALED	0x10	/* set when new kernel signal arrives */
+#define SIG_PENDING	0x20	/* unready while signal being processed */
+#define P_STOP		0x40	/* set when process is being traced */
+#define NO_PRIV		0x80	/* keep forked system process from running */
+#define NO_PRIORITY    0x100	/* process has been stopped */
+#define NO_ENDPOINT    0x200	/* process cannot send or receive messages */
+
+/* Misc flags */
+#define REPLY_PENDING	0x01	/* reply to IPC_REQUEST is pending */
+#define MF_VM		0x08	/* process uses VM */
+
+/* Scheduling priorities for p_priority. Values must start at zero (highest
+ * priority) and increment.  Priorities of the processes in the boot image 
+ * can be set in table.c. IDLE must have a queue for itself, to prevent low 
+ * priority user processes to run round-robin with IDLE. 
+ */
+#define NR_SCHED_QUEUES   32	/* MUST equal minimum priority + 1 */
+#define TASK_Q		   0	/* highest, used for kernel tasks */
+#define MAX_USER_Q  	   0    /* highest priority for user processes */   
+#define USER_Q  	  15    /* default (should correspond to nice 0) */   
+#define MIN_USER_Q	  30	/* minimum priority for user processes */
+#define IDLE_Q		  31    /* lowest, only IDLE process goes here */
+
+/* Magic process table addresses. */
+#define BEG_PROC_ADDR (&proc[0])
+#define BEG_USER_ADDR (&proc[NR_TASKS])
+#define END_PROC_ADDR (&proc[NR_TASKS + NR_PROCS])
+
+#define NIL_PROC          ((struct proc *) 0)		
+#define NIL_SYS_PROC      ((struct proc *) 1)		
+#define cproc_addr(n)     (&(proc + NR_TASKS)[(n)])
+#define proc_addr(n)      (pproc_addr + NR_TASKS)[(n)]
+#define proc_nr(p) 	  ((p)->p_nr)
+
+#define isokprocn(n)      ((unsigned) ((n) + NR_TASKS) < NR_PROCS + NR_TASKS)
+#define isemptyn(n)       isemptyp(proc_addr(n)) 
+#define isemptyp(p)       ((p)->p_rts_flags == SLOT_FREE)
+#define iskernelp(p)	  iskerneln((p)->p_nr)
+#define iskerneln(n)	  ((n) < 0)
+#define isuserp(p)        isusern((p)->p_nr)
+#define isusern(n)        ((n) >= 0)
+
+/* The process table and pointers to process table slots. The pointers allow
+ * faster access because now a process entry can be found by indexing the
+ * pproc_addr array, while accessing an element i requires a multiplication
+ * with sizeof(struct proc) to determine the address. 
+ */
+EXTERN struct proc proc[NR_TASKS + NR_PROCS];	/* process table */
+EXTERN struct proc *pproc_addr[NR_TASKS + NR_PROCS];
+EXTERN struct proc *rdy_head[NR_SCHED_QUEUES]; /* ptrs to ready list headers */
+EXTERN struct proc *rdy_tail[NR_SCHED_QUEUES]; /* ptrs to ready list tails */
+
+#endif /* PROC_H */

File 1.2/usr/src/kernel/proc.h

+#ifndef PROC_H
+#define PROC_H
+
+/* Here is the declaration of the process table.  It contains all process
+ * data, including registers, flags, scheduling priority, memory map, 
+ * accounting, message passing (IPC) information, and so on. 
+ *
+ * Many assembly code routines reference fields in it.  The offsets to these
+ * fields are defined in the assembler include file sconst.h.  When changing
+ * struct proc, be sure to change sconst.h to match.
+ */
+#include <minix/com.h>
+#include "protect.h"
+#include "const.h"
+#include "priv.h"
+ 
+struct proc {
+  struct stackframe_s p_reg;	/* process' registers saved in stack frame */
+
+#if (CHIP == INTEL)
+  reg_t p_ldt_sel;		/* selector in gdt with ldt base and limit */
+  struct segdesc_s p_ldt[2+NR_REMOTE_SEGS]; /* CS, DS and remote segments */
+#endif 
+
+#if (CHIP == M68000)
+/* M68000 specific registers and FPU details go here. */
+#endif 
+
+  proc_nr_t p_nr;		/* number of this process (for fast access) */
+  struct priv *p_priv;		/* system privileges structure */
+  short p_rts_flags;		/* process is runnable only if zero */
+  short p_misc_flags;		/* flags that do suspend the process */
+
+  char p_priority;		/* current scheduling priority */
+  char p_max_priority;		/* maximum scheduling priority */
+  char p_ticks_left;		/* number of scheduling ticks left */
+  char p_quantum_size;		/* quantum size in ticks */
+
+  struct mem_map p_memmap[NR_LOCAL_SEGS];   /* memory map (T, D, S) */
+
+  clock_t p_user_time;		/* user time in ticks */
+  clock_t p_sys_time;		/* sys time in ticks */
+
+  struct proc *p_nextready;	/* pointer to next ready process */
+  struct proc *p_caller_q;	/* head of list of procs wishing to send */
+  struct proc *p_q_link;	/* link to next proc wishing to send */
+  message *p_messbuf;		/* pointer to passed message buffer */
+  int p_getfrom_e;		/* from whom does process want to receive? */
+  int p_sendto_e;		/* to whom does process want to send? */
+
+  sigset_t p_pending;		/* bit map for pending kernel signals */
+
+  char p_name[P_NAME_LEN];	/* name of the process, including \0 */
+
+  int p_endpoint;		/* endpoint number, generation-aware */
+
+#if DEBUG_SCHED_CHECK
+  int p_ready, p_found;
+#endif
+};
+
+/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
+#define SLOT_FREE	0x01	/* process slot is free */
+#define NO_MAP		0x02	/* keeps unmapped forked child from running */
+#define SENDING		0x04	/* process blocked trying to send */
+#define RECEIVING	0x08	/* process blocked trying to receive */
+#define SIGNALED	0x10	/* set when new kernel signal arrives */
+#define SIG_PENDING	0x20	/* unready while signal being processed */
+#define P_STOP		0x40	/* set when process is being traced */
+#define NO_PRIV		0x80	/* keep forked system process from running */
+#define NO_PRIORITY    0x100	/* process has been stopped */
+#define NO_ENDPOINT    0x200	/* process cannot send or receive messages */
+
+/* Misc flags */
+#define REPLY_PENDING	0x01	/* reply to IPC_REQUEST is pending */
+#define MF_VM		0x08	/* process uses VM */
+
+/* Scheduling priorities for p_priority. Values must start at zero (highest
+ * priority) and increment.  Priorities of the processes in the boot image 
+ * can be set in table.c. IDLE must have a queue for itself, to prevent low 
+ * priority user processes to run round-robin with IDLE. 
+ */
+#define NR_SCHED_QUEUES    8	/* MUST equal minimum priority + 1 */
+#define TASK_Q		   0	/* highest, used for kernel tasks */
+#define MAX_USER_Q  	   0    /* highest priority for user processes */   
+#define USER_Q  	   5    /* default (should correspond to nice 0) */   
+#define MIN_USER_Q	   6	/* minimum priority for user processes */
+#define IDLE_Q		   7    /* lowest, only IDLE process goes here */
+
+/* Magic process table addresses. */
+#define BEG_PROC_ADDR (&proc[0])
+#define BEG_USER_ADDR (&proc[NR_TASKS])
+#define END_PROC_ADDR (&proc[NR_TASKS + NR_PROCS])
+
+#define NIL_PROC          ((struct proc *) 0)		
+#define NIL_SYS_PROC      ((struct proc *) 1)		
+#define cproc_addr(n)     (&(proc + NR_TASKS)[(n)])
+#define proc_addr(n)      (pproc_addr + NR_TASKS)[(n)]
+#define proc_nr(p) 	  ((p)->p_nr)
+
+#define isokprocn(n)      ((unsigned) ((n) + NR_TASKS) < NR_PROCS + NR_TASKS)
+#define isemptyn(n)       isemptyp(proc_addr(n)) 
+#define isemptyp(p)       ((p)->p_rts_flags == SLOT_FREE)
+#define iskernelp(p)	  iskerneln((p)->p_nr)
+#define iskerneln(n)	  ((n) < 0)
+#define isuserp(p)        isusern((p)->p_nr)
+#define isusern(n)        ((n) >= 0)
+
+/* The process table and pointers to process table slots. The pointers allow
+ * faster access because now a process entry can be found by indexing the
+ * pproc_addr array, while accessing an element i requires a multiplication
+ * with sizeof(struct proc) to determine the address. 
+ */
+EXTERN struct proc proc[NR_TASKS + NR_PROCS];	/* process table */
+EXTERN struct proc *pproc_addr[NR_TASKS + NR_PROCS];
+EXTERN struct proc *rdy_head[NR_SCHED_QUEUES]; /* ptrs to ready list headers */
+EXTERN struct proc *rdy_tail[NR_SCHED_QUEUES]; /* ptrs to ready list tails */
+
+#endif /* PROC_H */

File 1.3/usr/src/kernel/proc.h

+#ifndef PROC_H
+#define PROC_H
+
+/* Here is the declaration of the process table.  It contains all process
+ * data, including registers, flags, scheduling priority, memory map, 
+ * accounting, message passing (IPC) information, and so on. 
+ *
+ * Many assembly code routines reference fields in it.  The offsets to these
+ * fields are defined in the assembler include file sconst.h.  When changing
+ * struct proc, be sure to change sconst.h to match.
+ */
+#include <minix/com.h>
+#include "protect.h"
+#include "const.h"
+#include "priv.h"
+ 
+struct proc {
+  struct stackframe_s p_reg;	/* process' registers saved in stack frame */
+
+#if (CHIP == INTEL)
+  reg_t p_ldt_sel;		/* selector in gdt with ldt base and limit */
+  struct segdesc_s p_ldt[2+NR_REMOTE_SEGS]; /* CS, DS and remote segments */
+#endif 
+
+#if (CHIP == M68000)
+/* M68000 specific registers and FPU details go here. */
+#endif 
+
+  proc_nr_t p_nr;		/* number of this process (for fast access) */
+  struct priv *p_priv;		/* system privileges structure */
+  short p_rts_flags;		/* process is runnable only if zero */
+  short p_misc_flags;		/* flags that do suspend the process */
+
+  char p_priority;		/* current scheduling priority */
+  char p_max_priority;		/* maximum scheduling priority */
+  char p_ticks_left;		/* number of scheduling ticks left */
+  char p_quantum_size;		/* quantum size in ticks */
+
+  struct mem_map p_memmap[NR_LOCAL_SEGS];   /* memory map (T, D, S) */
+
+  clock_t p_user_time;		/* user time in ticks */
+  clock_t p_sys_time;		/* sys time in ticks */
+
+  struct proc *p_nextready;	/* pointer to next ready process */
+  struct proc *p_caller_q;	/* head of list of procs wishing to send */
+  struct proc *p_q_link;	/* link to next proc wishing to send */
+  message *p_messbuf;		/* pointer to passed message buffer */
+  int p_getfrom_e;		/* from whom does process want to receive? */
+  int p_sendto_e;		/* to whom does process want to send? */
+
+  sigset_t p_pending;		/* bit map for pending kernel signals */
+
+  char p_name[P_NAME_LEN];	/* name of the process, including \0 */
+
+  int p_endpoint;		/* endpoint number, generation-aware */
+
+#if DEBUG_SCHED_CHECK
+  int p_ready, p_found;
+#endif
+};
+
+/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
+#define SLOT_FREE	0x01	/* process slot is free */
+#define NO_MAP		0x02	/* keeps unmapped forked child from running */
+#define SENDING		0x04	/* process blocked trying to send */
+#define RECEIVING	0x08	/* process blocked trying to receive */
+#define SIGNALED	0x10	/* set when new kernel signal arrives */
+#define SIG_PENDING	0x20	/* unready while signal being processed */
+#define P_STOP		0x40	/* set when process is being traced */
+#define NO_PRIV		0x80	/* keep forked system process from running */
+#define NO_PRIORITY    0x100	/* process has been stopped */
+#define NO_ENDPOINT    0x200	/* process cannot send or receive messages */
+
+/* Misc flags */
+#define REPLY_PENDING	0x01	/* reply to IPC_REQUEST is pending */
+#define MF_VM		0x08	/* process uses VM */
+
+/* Scheduling priorities for p_priority. Values must start at zero (highest
+ * priority) and increment.  Priorities of the processes in the boot image 
+ * can be set in table.c. IDLE must have a queue for itself, to prevent low 
+ * priority user processes to run round-robin with IDLE. 
+ */
+#define NR_SCHED_QUEUES    2	/* MUST equal minimum priority + 1 */
+#define TASK_Q		   0	/* highest, used for kernel tasks */
+#define MAX_USER_Q  	   0    /* highest priority for user processes */   
+#define USER_Q  	   0    /* default (should correspond to nice 0) */   
+#define MIN_USER_Q	   0	/* minimum priority for user processes */
+#define IDLE_Q		   1    /* lowest, only IDLE process goes here */
+
+/* Magic process table addresses. */
+#define BEG_PROC_ADDR (&proc[0])
+#define BEG_USER_ADDR (&proc[NR_TASKS])
+#define END_PROC_ADDR (&proc[NR_TASKS + NR_PROCS])
+
+#define NIL_PROC          ((struct proc *) 0)		
+#define NIL_SYS_PROC      ((struct proc *) 1)		
+#define cproc_addr(n)     (&(proc + NR_TASKS)[(n)])
+#define proc_addr(n)      (pproc_addr + NR_TASKS)[(n)]
+#define proc_nr(p) 	  ((p)->p_nr)
+
+#define isokprocn(n)      ((unsigned) ((n) + NR_TASKS) < NR_PROCS + NR_TASKS)
+#define isemptyn(n)       isemptyp(proc_addr(n)) 
+#define isemptyp(p)       ((p)->p_rts_flags == SLOT_FREE)
+#define iskernelp(p)	  iskerneln((p)->p_nr)
+#define iskerneln(n)	  ((n) < 0)
+#define isuserp(p)        isusern((p)->p_nr)
+#define isusern(n)        ((n) >= 0)
+
+/* The process table and pointers to process table slots. The pointers allow
+ * faster access because now a process entry can be found by indexing the
+ * pproc_addr array, while accessing an element i requires a multiplication
+ * with sizeof(struct proc) to determine the address. 
+ */
+EXTERN struct proc proc[NR_TASKS + NR_PROCS];	/* process table */
+EXTERN struct proc *pproc_addr[NR_TASKS + NR_PROCS];
+EXTERN struct proc *rdy_head[NR_SCHED_QUEUES]; /* ptrs to ready list headers */
+EXTERN struct proc *rdy_tail[NR_SCHED_QUEUES]; /* ptrs to ready list tails */
+
+#endif /* PROC_H */

File 1.3/usr/src/kernel/table.c

+/* The object file of "table.c" contains most kernel data. Variables that 
+ * are declared in the *.h files appear with EXTERN in front of them, as in
+ *
+ *    EXTERN int x;
+ *
+ * Normally EXTERN is defined as extern, so when they are included in another
+ * file, no storage is allocated.  If EXTERN were not present, but just say,
+ *
+ *    int x;
+ *
+ * then including this file in several source files would cause 'x' to be
+ * declared several times.  While some linkers accept this, others do not,
+ * so they are declared extern when included normally.  However, it must be
+ * declared for real somewhere.  That is done here, by redefining EXTERN as
+ * the null string, so that inclusion of all *.h files in table.c actually
+ * generates storage for them.  
+ *
+ * Various variables could not be declared EXTERN, but are declared PUBLIC
+ * or PRIVATE. The reason for this is that extern variables cannot have a  
+ * default initialization. If such variables are shared, they must also be
+ * declared in one of the *.h files without the initialization.  Examples 
+ * include 'boot_image' (this file) and 'idt' and 'gdt' (protect.c). 
+ *
+ * Changes:
+ *    Aug 02, 2005   set privileges and minimal boot image (Jorrit N. Herder)
+ *    Oct 17, 2004   updated above and tasktab comments  (Jorrit N. Herder)
+ *    May 01, 2004   changed struct for system image  (Jorrit N. Herder)
+ */
+#define _TABLE
+
+#include "kernel.h"
+#include "proc.h"
+#include "ipc.h"
+#include <minix/com.h>
+#include <ibm/int86.h>
+
+/* Define stack sizes for the kernel tasks included in the system image. */
+#define NO_STACK	0
+#define SMALL_STACK	(128 * sizeof(char *))
+#define IDL_S	SMALL_STACK	/* 3 intr, 3 temps, 4 db for Intel */
+#define	HRD_S	NO_STACK	/* dummy task, uses kernel stack */
+#define	TSK_S	SMALL_STACK	/* system and clock task */
+
+/* Stack space for all the task stacks.  Declared as (char *) to align it. */
+#define	TOT_STACK_SPACE	(IDL_S + HRD_S + (2 * TSK_S))
+PUBLIC char *t_stack[TOT_STACK_SPACE / sizeof(char *)];
+	
+/* Define flags for the various process types. */
+#define IDL_F 	(SYS_PROC | PREEMPTIBLE | BILLABLE)	/* idle task */
+#define TSK_F 	(SYS_PROC)				/* kernel tasks */
+#define SRV_F 	(SYS_PROC | PREEMPTIBLE)		/* system services */
+#define USR_F	(BILLABLE | PREEMPTIBLE)		/* user processes */
+
+/* Define system call traps for the various process types. These call masks
+ * determine what system call traps a process is allowed to make.
+ */
+#define TSK_T	(1 << RECEIVE)			/* clock and system */
+#define SRV_T	(~0)				/* system services */
+#define USR_T   ((1 << SENDREC) | (1 << ECHO))	/* user processes */
+
+/* Send masks determine to whom processes can send messages or notifications. 
+ * The values here are used for the processes in the boot image. We rely on 
+ * the initialization code in main() to match the s_nr_to_id() mapping for the
+ * processes in the boot image, so that the send mask that is defined here 
+ * can be directly copied onto map[0] of the actual send mask. Privilege
+ * structure 0 is shared by user processes. 
+ */
+#define s(n)		(1 << s_nr_to_id(n))
+#define SRV_M	(~0)
+#define SYS_M	(~0)
+#define USR_M (s(PM_PROC_NR) | s(FS_PROC_NR) | s(RS_PROC_NR) | s(SYSTEM))
+#define DRV_M (USR_M | s(SYSTEM) | s(CLOCK) | s(DS_PROC_NR) | s(LOG_PROC_NR) | s(TTY_PROC_NR))
+
+/* Define kernel calls that processes are allowed to make. This is not looking
+ * very nice, but we need to define the access rights on a per call basis. 
+ * Note that the reincarnation server has all bits on, because it should
+ * be allowed to distribute rights to services that it starts. 
+ */
+#define c(n)	(1 << ((n)-KERNEL_CALL))
+#define RS_C	~0	
+#define DS_C	~0	
+#define PM_C	~(c(SYS_DEVIO) | c(SYS_SDEVIO) | c(SYS_VDEVIO) | c(SYS_IRQCTL) | c(SYS_INT86))
+#define FS_C	(c(SYS_KILL) | c(SYS_VIRCOPY) | c(SYS_VIRVCOPY) | c(SYS_UMAP) | c(SYS_GETINFO) | c(SYS_EXIT) | c(SYS_TIMES) | c(SYS_SETALARM))
+#define DRV_C (FS_C | c(SYS_SEGCTL) | c(SYS_IRQCTL) | c(SYS_INT86) | c(SYS_DEVIO) | c(SYS_SDEVIO) | c(SYS_VDEVIO))
+#define TTY_C (DRV_C | c(SYS_ABORT) | c(SYS_VM_MAP) | c(SYS_IOPENABLE))
+#define MEM_C	(DRV_C | c(SYS_PHYSCOPY) | c(SYS_PHYSVCOPY) | c(SYS_VM_MAP) | \
+	c(SYS_IOPENABLE))
+
+/* The system image table lists all programs that are part of the boot image. 
+ * The order of the entries here MUST agree with the order of the programs
+ * in the boot image and all kernel tasks must come first.
+ *
+ * Each entry provides the process number, flags, quantum size, scheduling
+ * queue, allowed traps, ipc mask, and a name for the process table. The 
+ * initial program counter and stack size is also provided for kernel tasks.
+ *
+ * Note: the quantum size must be positive in all cases! 
+ */
+PUBLIC struct boot_image image[] = {
+/* process nr,   pc, flags, qs,  queue, stack, traps, ipcto, call,  name */ 
+ { IDLE,  idle_task, IDL_F,  8, IDLE_Q, IDL_S,     0,     0,     0, "idle"  },
+ { CLOCK,clock_task, TSK_F,  8, TASK_Q, TSK_S, TSK_T,     0,     0, "clock" },
+ { SYSTEM, sys_task, TSK_F,  8, TASK_Q, TSK_S, TSK_T,     0,     0, "system"},
+ { HARDWARE,      0, TSK_F,  8, TASK_Q, HRD_S,     0,     0,     0, "kernel"},
+ { PM_PROC_NR,    0, SRV_F, 32,      0, 0,     SRV_T, SRV_M,  PM_C, "pm"    },
+ { FS_PROC_NR,    0, SRV_F, 32,      0, 0,     SRV_T, SRV_M,  FS_C, "fs"    },
+ { RS_PROC_NR,    0, SRV_F,  4,      0, 0,     SRV_T, SYS_M,  RS_C, "rs"    },
+ { DS_PROC_NR,    0, SRV_F,  4,      0, 0,     SRV_T, SYS_M,  DS_C, "ds"    },
+ { TTY_PROC_NR,   0, SRV_F,  4,      0, 0,     SRV_T, SYS_M, TTY_C, "tty"   },
+ { MEM_PROC_NR,   0, SRV_F,  4,      0, 0,     SRV_T, SYS_M, MEM_C, "mem"   },
+ { LOG_PROC_NR,   0, SRV_F,  4,      0, 0,     SRV_T, SYS_M, DRV_C, "log"   },
+ { INIT_PROC_NR,  0, USR_F,  8, USER_Q, 0,     USR_T, USR_M,     0, "init"  },
+};
+
+/* Verify the size of the system image table at compile time. Also verify that 
+ * the first chunk of the ipc mask has enough bits to accommodate the processes
+ * in the image.  
+ * If a problem is detected, the size of the 'dummy' array will be negative, 
+ * causing a compile time error. Note that no space is actually allocated 
+ * because 'dummy' is declared extern.
+ */
+extern int dummy[(NR_BOOT_PROCS==sizeof(image)/
+	sizeof(struct boot_image))?1:-1];
+extern int dummy[(BITCHUNK_BITS > NR_BOOT_PROCS - 1) ? 1 : -1];
+

File 2.tar.gz

Binary file added.

File 2/usr/src/include/minix/syslib.h

+/* Prototypes for system library functions. */
+
+#ifndef _SYSLIB_H
+#define _SYSLIB_H
+
+#ifndef _TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifndef _IPC_H
+#include <minix/ipc.h>
+#endif
+
+#ifndef _DEVIO_H
+#include <minix/devio.h>
+#endif
+
+/* Forward declaration */
+struct reg86u;
+
+#define SYSTASK SYSTEM
+
+/*==========================================================================* 
+ * Minix system library. 						    *
+ *==========================================================================*/ 
+_PROTOTYPE( int _taskcall, (int who, int syscallnr, message *msgptr));
+
+_PROTOTYPE( int sys_abort, (int how, ...));
+_PROTOTYPE( int sys_enable_iop, (int proc));
+_PROTOTYPE( int sys_exec, (int proc, char *ptr,  
+				char *aout, vir_bytes initpc));
+/* We added in the sys_fork as fourth argument an integer which represents
+ * the uid */
+_PROTOTYPE( int sys_fork, (int parent, int child, int *, int realuid));
+_PROTOTYPE( int sys_newmap, (int proc, struct mem_map *ptr));
+_PROTOTYPE( int sys_exit, (int proc));
+_PROTOTYPE( int sys_trace, (int req, int proc, long addr, long *data_p));
+
+_PROTOTYPE( int sys_privctl, (int proc, int req, int i, void *p));
+_PROTOTYPE( int sys_nice, (int proc, int priority));
+
+_PROTOTYPE( int sys_int86, (struct reg86u *reg86p));
+_PROTOTYPE( int sys_vm_setbuf, (phys_bytes base, phys_bytes size,
+							phys_bytes high));
+_PROTOTYPE( int sys_vm_map, (int proc_nr, int do_map,
+	phys_bytes base, phys_bytes size, phys_bytes offset));
+
+/* Shorthands for sys_sdevio() system call. */
+#define sys_insb(port, proc_nr, buffer, count) \
+	sys_sdevio(DIO_INPUT, port, DIO_BYTE, proc_nr, buffer, count)
+#define sys_insw(port, proc_nr, buffer, count) \
+	sys_sdevio(DIO_INPUT, port, DIO_WORD, proc_nr, buffer, count)
+#define sys_outsb(port, proc_nr, buffer, count) \
+	sys_sdevio(DIO_OUTPUT, port, DIO_BYTE, proc_nr, buffer, count)
+#define sys_outsw(port, proc_nr, buffer, count) \
+	sys_sdevio(DIO_OUTPUT, port, DIO_WORD, proc_nr, buffer, count)
+_PROTOTYPE( int sys_sdevio, (int req, long port, int type, int proc_nr,
+	void *buffer, int count));
+
+/* Clock functionality: get system times or (un)schedule an alarm call. */
+_PROTOTYPE( int sys_times, (int proc_nr, clock_t *ptr));
+_PROTOTYPE(int sys_setalarm, (clock_t exp_time, int abs_time));
+
+/* Shorthands for sys_irqctl() system call. */
+#define sys_irqdisable(hook_id) \
+    sys_irqctl(IRQ_DISABLE, 0, 0, hook_id) 
+#define sys_irqenable(hook_id) \
+    sys_irqctl(IRQ_ENABLE, 0, 0, hook_id) 
+#define sys_irqsetpolicy(irq_vec, policy, hook_id) \
+    sys_irqctl(IRQ_SETPOLICY, irq_vec, policy, hook_id)
+#define sys_irqrmpolicy(irq_vec, hook_id) \
+    sys_irqctl(IRQ_RMPOLICY, irq_vec, 0, hook_id)
+_PROTOTYPE ( int sys_irqctl, (int request, int irq_vec, int policy,
+    int *irq_hook_id) );
+
+/* Shorthands for sys_vircopy() and sys_physcopy() system calls. */
+#define sys_biosin(bios_vir, dst_vir, bytes) \
+	sys_vircopy(SELF, BIOS_SEG, bios_vir, SELF, D, dst_vir, bytes)
+#define sys_biosout(src_vir, bios_vir, bytes) \
+	sys_vircopy(SELF, D, src_vir, SELF, BIOS_SEG, bios_vir, bytes)
+#define sys_datacopy(src_proc, src_vir, dst_proc, dst_vir, bytes) \
+	sys_vircopy(src_proc, D, src_vir, dst_proc, D, dst_vir, bytes)
+#define sys_textcopy(src_proc, src_vir, dst_proc, dst_vir, bytes) \
+	sys_vircopy(src_proc, T, src_vir, dst_proc, T, dst_vir, bytes)
+#define sys_stackcopy(src_proc, src_vir, dst_proc, dst_vir, bytes) \
+	sys_vircopy(src_proc, S, src_vir, dst_proc, S, dst_vir, bytes)
+_PROTOTYPE(int sys_vircopy, (int src_proc, int src_seg, vir_bytes src_vir,
+	int dst_proc, int dst_seg, vir_bytes dst_vir, phys_bytes bytes));
+
+#define sys_abscopy(src_phys, dst_phys, bytes) \
+	sys_physcopy(NONE, PHYS_SEG, src_phys, NONE, PHYS_SEG, dst_phys, bytes)
+_PROTOTYPE(int sys_physcopy, (int src_proc, int src_seg, vir_bytes src_vir,
+	int dst_proc, int dst_seg, vir_bytes dst_vir, phys_bytes bytes));
+_PROTOTYPE(int sys_memset, (unsigned long pattern, 
+		phys_bytes base, phys_bytes bytes));
+
+/* Vectored virtual / physical copy calls. */
+#if DEAD_CODE		/* library part not yet implemented */
+_PROTOTYPE(int sys_virvcopy, (phys_cp_req *vec_ptr,int vec_size,int *nr_ok));
+_PROTOTYPE(int sys_physvcopy, (phys_cp_req *vec_ptr,int vec_size,int *nr_ok));
+#endif
+
+_PROTOTYPE(int sys_umap, (int proc_nr, int seg, vir_bytes vir_addr,
+	 vir_bytes bytes, phys_bytes *phys_addr));
+_PROTOTYPE(int sys_segctl, (int *index, u16_t *seg, vir_bytes *off,
+	phys_bytes phys, vir_bytes size));
+
+/* Shorthands for sys_getinfo() system call. */
+#define sys_getkmessages(dst)	sys_getinfo(GET_KMESSAGES, dst, 0,0,0)
+#define sys_getkinfo(dst)	sys_getinfo(GET_KINFO, dst, 0,0,0)
+#define sys_getloadinfo(dst)	sys_getinfo(GET_LOADINFO, dst, 0,0,0)
+#define sys_getmachine(dst)	sys_getinfo(GET_MACHINE, dst, 0,0,0)
+#define sys_getproctab(dst)	sys_getinfo(GET_PROCTAB, dst, 0,0,0)
+#define sys_getprivtab(dst)	sys_getinfo(GET_PRIVTAB, dst, 0,0,0)
+#define sys_getproc(dst,nr)	sys_getinfo(GET_PROC, dst, 0,0, nr)
+#define sys_getrandomness(dst)	sys_getinfo(GET_RANDOMNESS, dst, 0,0,0)
+#define sys_getimage(dst)	sys_getinfo(GET_IMAGE, dst, 0,0,0)
+#define sys_getirqhooks(dst)	sys_getinfo(GET_IRQHOOKS, dst, 0,0,0)
+#define sys_getirqactids(dst)	sys_getinfo(GET_IRQACTIDS, dst, 0,0,0)
+#define sys_getmonparams(v,vl)	sys_getinfo(GET_MONPARAMS, v,vl, 0,0)
+#define sys_getschedinfo(v1,v2)	sys_getinfo(GET_SCHEDINFO, v1,0, v2,0)
+#define sys_getlocktimings(dst)	sys_getinfo(GET_LOCKTIMING, dst, 0,0,0)
+#define sys_getbiosbuffer(virp, sizep) sys_getinfo(GET_BIOSBUFFER, virp, \
+	sizeof(*virp), sizep, sizeof(*sizep))
+_PROTOTYPE(int sys_getinfo, (int request, void *val_ptr, int val_len,
+				 void *val_ptr2, int val_len2)		);
+
+/* Signal control. */
+_PROTOTYPE(int sys_kill, (int proc, int sig) );
+_PROTOTYPE(int sys_sigsend, (int proc_nr, struct sigmsg *sig_ctxt) ); 
+_PROTOTYPE(int sys_sigreturn, (int proc_nr, struct sigmsg *sig_ctxt) );
+_PROTOTYPE(int sys_getksig, (int *k_proc_nr, sigset_t *k_sig_map) ); 
+_PROTOTYPE(int sys_endksig, (int proc_nr) );
+
+/* NOTE: two different approaches were used to distinguish the device I/O
+ * types 'byte', 'word', 'long': the latter uses #define and results in a
+ * smaller implementation, but looses the static type checking.
+ */
+_PROTOTYPE(int sys_voutb, (pvb_pair_t *pvb_pairs, int nr_ports)		);
+_PROTOTYPE(int sys_voutw, (pvw_pair_t *pvw_pairs, int nr_ports)		);
+_PROTOTYPE(int sys_voutl, (pvl_pair_t *pvl_pairs, int nr_ports)		);
+_PROTOTYPE(int sys_vinb, (pvb_pair_t *pvb_pairs, int nr_ports)		);
+_PROTOTYPE(int sys_vinw, (pvw_pair_t *pvw_pairs, int nr_ports)		);
+_PROTOTYPE(int sys_vinl, (pvl_pair_t *pvl_pairs, int nr_ports)		);
+
+/* Shorthands for sys_out() system call. */
+#define sys_outb(p,v)	sys_out((p), (unsigned long) (v), DIO_BYTE)
+#define sys_outw(p,v)	sys_out((p), (unsigned long) (v), DIO_WORD)
+#define sys_outl(p,v)	sys_out((p), (unsigned long) (v), DIO_LONG)
+_PROTOTYPE(int sys_out, (int port, unsigned long value, int type)	); 
+
+/* Shorthands for sys_in() system call. */
+#define sys_inb(p,v)	sys_in((p), (v), DIO_BYTE)
+#define sys_inw(p,v)	sys_in((p), (v), DIO_WORD)
+#define sys_inl(p,v)	sys_in((p), (v), DIO_LONG)
+_PROTOTYPE(int sys_in, (int port, unsigned long *value, int type)	);
+
+/* pci.c */
+_PROTOTYPE( void pci_init, (void)					);
+_PROTOTYPE( void pci_init1, (char *name)				);
+_PROTOTYPE( int pci_first_dev, (int *devindp, u16_t *vidp, u16_t *didp)	);
+_PROTOTYPE( int pci_next_dev, (int *devindp, u16_t *vidp, u16_t *didp)	);
+_PROTOTYPE( int pci_find_dev, (U8_t bus, U8_t dev, U8_t func,
+							int *devindp)	);
+_PROTOTYPE( void pci_reserve, (int devind)				);
+_PROTOTYPE( void pci_ids, (int devind, u16_t *vidp, u16_t *didp)	);
+_PROTOTYPE( void pci_rescan_bus, (U8_t busnr)				);
+_PROTOTYPE( u8_t pci_attr_r8, (int devind, int port)			);
+_PROTOTYPE( u16_t pci_attr_r16, (int devind, int port)			);
+_PROTOTYPE( u32_t pci_attr_r32, (int devind, int port)			);
+_PROTOTYPE( void pci_attr_w8, (int devind, int port, U8_t value)	);
+_PROTOTYPE( void pci_attr_w16, (int devind, int port, U16_t value)	);
+_PROTOTYPE( void pci_attr_w32, (int devind, int port, u32_t value)	);
+_PROTOTYPE( char *pci_dev_name, (U16_t vid, U16_t did)			);
+_PROTOTYPE( char *pci_slot_name, (int devind)				);
+
+#endif /* _SYSLIB_H */
+

File 2/usr/src/kernel/main.c

+/* This file contains the main program of MINIX as well as its shutdown code.
+ * The routine main() initializes the system and starts the ball rolling by
+ * setting up the process table, interrupt vectors, and scheduling each task 
+ * to run to initialize itself.
+ * The routine shutdown() does the opposite and brings down MINIX. 
+ *
+ * The entries into this file are:
+ *   main:	    	MINIX main program
+ *   prepare_shutdown:	prepare to take MINIX down
+ *
+ * Changes:
+ *   Nov 24, 2004   simplified main() with system image  (Jorrit N. Herder)
+ *   Aug 20, 2004   new prepare_shutdown() and shutdown()  (Jorrit N. Herder)
+ */
+#include "kernel.h"
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <a.out.h>
+#include <minix/callnr.h>
+#include <minix/com.h>
+#include <minix/endpoint.h>
+#include "proc.h"
+
+#define END -50
+/* Prototype declarations for PRIVATE functions. */
+FORWARD _PROTOTYPE( void announce, (void));	
+FORWARD _PROTOTYPE( void shutdown, (timer_t *tp));
+
+/*===========================================================================*
+ *				main                                         *
+ *===========================================================================*/
+PUBLIC void main()
+{
+/* Start the ball rolling. */
+  struct boot_image *ip;	/* boot image pointer */
+  register struct proc *rp;	/* process pointer */
+  register struct priv *sp;	/* privilege structure pointer */
+  register int i, s;
+  int hdrindex;			/* index to array of a.out headers */
+  phys_clicks text_base;
+  vir_clicks text_clicks, data_clicks;
+  reg_t ktsb;			/* kernel task stack base */
+  struct exec e_hdr;		/* for a copy of an a.out header */
+
+  /* initialization of the subscript which points to the user who is going to
+   * execute a process that belongs to him */
+  next_user = 0;
+  /* initialize the array of users with a value which denotes that we have
+   * reached the end of the list with active users. This is performed for
+   * optimisation reasons */
+  for(i = 0;i < MAX_USERS; i++){
+	list_of_uid[i] = END;
+  }
+
+
+  /* Initialize the interrupt controller. */
+  intr_init(1);
+
+  /* Clear the process table. Anounce each slot as empty and set up mappings 
+   * for proc_addr() and proc_nr() macros. Do the same for the table with 
+   * privilege structures for the system processes. 
+   */
+  for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
+  	rp->p_rts_flags = SLOT_FREE;		/* initialize free slot */
+	rp->p_nr = i;				/* proc number from ptr */
+	rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
+        (pproc_addr + NR_TASKS)[i] = rp;        /* proc ptr from number */
+  }
+  for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
+	sp->s_proc_nr = NONE;			/* initialize as free */
+	sp->s_id = i;				/* priv structure index */
+	ppriv_addr[i] = sp;			/* priv ptr from number */
+  }
+
+  /* Set up proc table entries for processes in boot image.  The stacks of the
+   * kernel tasks are initialized to an array in data space.  The stacks
+   * of the servers have been added to the data segment by the monitor, so
+   * the stack pointer is set to the end of the data segment.  All the
+   * processes are in low memory on the 8086.  On the 386 only the kernel
+   * is in low memory, the rest is loaded in extended memory.
+   */
+
+  /* Task stacks. */
+  ktsb = (reg_t) t_stack;
+
+  for (i=0; i < NR_BOOT_PROCS; ++i) {
+	ip = &image[i];				/* process' attributes */
+	rp = proc_addr(ip->proc_nr);		/* get process pointer */
+	ip->endpoint = rp->p_endpoint;		/* ipc endpoint */
+	rp->p_max_priority = ip->priority;	/* max scheduling priority */
+	rp->p_priority = ip->priority;		/* current priority */
+	rp->p_quantum_size = ip->quantum;	/* quantum size in ticks */
+	rp->p_ticks_left = ip->quantum;		/* current credit */
+	strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */
+	(void) get_priv(rp, (ip->flags & SYS_PROC));    /* assign structure */
+	priv(rp)->s_flags = ip->flags;			/* process flags */
+	priv(rp)->s_trap_mask = ip->trap_mask;		/* allowed traps */
+	priv(rp)->s_call_mask = ip->call_mask;		/* kernel call mask */
+	priv(rp)->s_ipc_to.chunk[0] = ip->ipc_to;	/* restrict targets */
+	if (iskerneln(proc_nr(rp))) {		/* part of the kernel? */ 
+		if (ip->stksize > 0) {		/* HARDWARE stack size is 0 */
+			rp->p_priv->s_stack_guard = (reg_t *) ktsb;
+			*rp->p_priv->s_stack_guard = STACK_GUARD;
+		}
+		ktsb += ip->stksize;	/* point to high end of stack */
+		rp->p_reg.sp = ktsb;	/* this task's initial stack ptr */
+		text_base = kinfo.code_base >> CLICK_SHIFT;
+					/* processes that are in the kernel */
+		hdrindex = 0;		/* all use the first a.out header */
+	} else {
+		hdrindex = 1 + i-NR_TASKS;	/* servers, drivers, INIT */
+	}
+
+	/* The bootstrap loader created an array of the a.out headers at
+	 * absolute address 'aout'. Get one element to e_hdr.
+	 */
+	phys_copy(aout + hdrindex * A_MINHDR, vir2phys(&e_hdr),
+						(phys_bytes) A_MINHDR);
+	/* Convert addresses to clicks and build process memory map */
+	text_base = e_hdr.a_syms >> CLICK_SHIFT;
+	text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT;
+	if (!(e_hdr.a_flags & A_SEP)) text_clicks = 0;	   /* common I&D */
+	data_clicks = (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT;
+	rp->p_memmap[T].mem_phys = text_base;
+	rp->p_memmap[T].mem_len  = text_clicks;
+	rp->p_memmap[D].mem_phys = text_base + text_clicks;
+	rp->p_memmap[D].mem_len  = data_clicks;
+	rp->p_memmap[S].mem_phys = text_base + text_clicks + data_clicks;
+	rp->p_memmap[S].mem_vir  = data_clicks;	/* empty - stack is in data */
+
+	/* Set initial register values.  The processor status word for tasks 
+	 * is different from that of other processes because tasks can
+	 * access I/O; this is not allowed to less-privileged processes 
+	 */
+	rp->p_reg.pc = (reg_t) ip->initial_pc;
+	rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW;
+
+	/* Initialize the server stack pointer. Take it down one word
+	 * to give crtso.s something to use as "argc".
+	 */
+	if (isusern(proc_nr(rp))) {		/* user-space process? */ 
+		rp->p_reg.sp = (rp->p_memmap[S].mem_vir +
+				rp->p_memmap[S].mem_len) << CLICK_SHIFT;
+		rp->p_reg.sp -= sizeof(reg_t);
+	}
+	
+	/* Set ready. The HARDWARE task is never ready. */
+	if (rp->p_nr != HARDWARE) {
+		rp->p_rts_flags = 0;		/* runnable if no flags */
+		lock_enqueue(rp);		/* add to scheduling queues */
+	} else {
+		rp->p_rts_flags = NO_MAP;	/* prevent from running */
+	}
+
+	/* Code and data segments must be allocated in protected mode. */
+	alloc_segments(rp);
+  }
+
+#if ENABLE_BOOTDEV 
+  /* Expect an image of the boot device to be loaded into memory as well. 
+   * The boot device is the last module that is loaded into memory, and, 
+   * for example, can contain the root FS (useful for embedded systems). 
+   */
+  hdrindex ++;
+  phys_copy(aout + hdrindex * A_MINHDR,vir2phys(&e_hdr),(phys_bytes) A_MINHDR);
+  if (e_hdr.a_flags & A_IMG) {
+  	kinfo.bootdev_base = e_hdr.a_syms; 
+  	kinfo.bootdev_size = e_hdr.a_data; 
+  }
+#endif
+
+  /* MINIX is now ready. All boot image processes are on the ready queue.
+   * Return to the assembly code to start running the current process. 
+   */
+  bill_ptr = proc_addr(IDLE);		/* it has to point somewhere */
+  announce();				/* print MINIX startup banner */
+  restart();
+}
+
+/*===========================================================================*
+ *				announce				     *
+ *===========================================================================*/
+PRIVATE void announce(void)
+{
+  /* Display the MINIX startup banner. */
+  kprintf("\nMINIX %s.%s. "
+      "Copyright 2006, Vrije Universiteit, Amsterdam, The Netherlands\n",
+      OS_RELEASE, OS_VERSION);
+#if (CHIP == INTEL)
+  /* Real mode, or 16/32-bit protected mode? */
+  kprintf("Executing in %s mode.\n\n",
+      machine.prot ? "32-bit protected" : "real");
+#endif
+}
+
+/*===========================================================================*
+ *				prepare_shutdown			     *
+ *===========================================================================*/
+PUBLIC void prepare_shutdown(how)
+int how;
+{
+/* This function prepares to shutdown MINIX. */
+  static timer_t shutdown_timer;
+  register struct proc *rp; 
+  message m;
+
+  /* Send a signal to all system processes that are still alive to inform 
+   * them that the MINIX kernel is shutting down. A proper shutdown sequence
+   * should be implemented by a user-space server. This mechanism is useful
+   * as a backup in case of system panics, so that system processes can still
+   * run their shutdown code, e.g, to synchronize the FS or to let the TTY
+   * switch to the first console. 
+   */
+#if DEAD_CODE
+  kprintf("Sending SIGKSTOP to system processes ...\n"); 
+  for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
+      if (!isemptyp(rp) && (priv(rp)->s_flags & SYS_PROC) && !iskernelp(rp))
+          send_sig(proc_nr(rp), SIGKSTOP);
+  }
+#endif
+
+  /* Continue after 1 second, to give processes a chance to get scheduled to 
+   * do shutdown work.  Set a watchog timer to call shutdown(). The timer 
+   * argument passes the shutdown status. 
+   */
+  kprintf("MINIX will now be shut down ...\n");
+  tmr_arg(&shutdown_timer)->ta_int = how;
+  set_timer(&shutdown_timer, get_uptime() + HZ, shutdown);
+}
+/*===========================================================================*
+ *				shutdown 				     *
+ *===========================================================================*/
+PRIVATE void shutdown(tp)
+timer_t *tp;
+{
+/* This function is called from prepare_shutdown or stop_sequence to bring 
+ * down MINIX. How to shutdown is in the argument: RBT_HALT (return to the
+ * monitor), RBT_MONITOR (execute given code), RBT_RESET (hard reset). 
+ */
+  int how = tmr_arg(tp)->ta_int;
+  u16_t magic; 
+
+  /* Now mask all interrupts, including the clock, and stop the clock. */
+  outb(INT_CTLMASK, ~0); 
+  clock_stop();
+
+  if (mon_return && how != RBT_RESET) {
+	/* Reinitialize the interrupt controllers to the BIOS defaults. */
+	intr_init(0);
+	outb(INT_CTLMASK, 0);
+	outb(INT2_CTLMASK, 0);
+
+	/* Return to the boot monitor. Set the program if not already done. */
+	if (how != RBT_MONITOR) phys_copy(vir2phys(""), kinfo.params_base, 1); 
+	level0(monitor);
+  }
+
+  /* Reset the system by jumping to the reset address (real mode), or by
+   * forcing a processor shutdown (protected mode). First stop the BIOS 
+   * memory test by setting a soft reset flag. 
+   */
+  magic = STOP_MEM_CHECK;
+  phys_copy(vir2phys(&magic), SOFT_RESET_FLAG_ADDR, SOFT_RESET_FLAG_SIZE);
+  level0(reset);
+}
+

File 2/usr/src/kernel/proc.c

+/* This file contains essentially all of the process and message handling.
+ * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
+ * There is one entry point from the outside:
+ *
+ *   sys_call: 	      a system call, i.e., the kernel is trapped with an INT
+ *
+ * As well as several entry points used from the interrupt and task level:
+ *
+ *   lock_notify:     notify a process of a system event
+ *   lock_send:	      send a message to a process
+ *   lock_enqueue:    put a process on one of the scheduling queues
+ *   lock_dequeue:    remove a process from the scheduling queues
+ *
+ * Changes:
+ *   Aug 19, 2005     rewrote scheduling code  (Jorrit N. Herder)
+ *   Jul 25, 2005     rewrote system call handling  (Jorrit N. Herder)
+ *   May 26, 2005     rewrote message passing functions  (Jorrit N. Herder)
+ *   May 24, 2005     new notification system call  (Jorrit N. Herder)
+ *   Oct 28, 2004     nonblocking send and receive calls  (Jorrit N. Herder)
+ *
+ * The code here is critical to make everything work and is important for the
+ * overall performance of the system. A large fraction of the code deals with
+ * list manipulation. To make this both easy to understand and fast to execute
+ * pointer pointers are used throughout the code. Pointer pointers prevent
+ * exceptions for the head or tail of a linked list.
+ *
+ *  node_t *queue, *new_node;	// assume these as global variables
+ *  node_t **xpp = &queue; 	// get pointer pointer to head of queue
+ *  while (*xpp != NULL) 	// find last pointer of the linked list
+ *      xpp = &(*xpp)->next;	// get pointer to next pointer
+ *  *xpp = new_node;		// now replace the end (the NULL pointer)
+ *  new_node->next = NULL;	// and mark the new end of the list
+ *
+ * For example, when adding a new node to the end of the list, one normally
+ * makes an exception for an empty list and looks up the end of the list for
+ * nonempty lists. As shown above, this is not required with pointer pointers.
+ */
+
+#include <minix/com.h>
+#include <minix/callnr.h>
+#include <minix/endpoint.h>
+#include "debug.h"
+#include "kernel.h"
+#include <signal.h>
+#include "users_list.h"
+
+/* Scheduling and message passing functions. The functions are available to
+ * other parts of the kernel through lock_...(). The lock temporarily disables
+ * interrupts to prevent race conditions.
+ */
+FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
+		message *m_ptr, unsigned flags));
+FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src,
+		message *m_ptr, unsigned flags));
+FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst));
+FORWARD _PROTOTYPE( int deadlock, (int function,
+		register struct proc *caller, int src_dst));
+FORWARD _PROTOTYPE( void enqueue, (struct proc *rp));
+FORWARD _PROTOTYPE( void dequeue, (struct proc *rp));
+FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
+FORWARD _PROTOTYPE( void pick_proc, (void));
+/* functions to help us in fair schedule */
+FORWARD _PROTOTYPE( struct proc* search_proc, (struct proc *head, int uid));
+FORWARD _PROTOTYPE( struct proc* swap_head, (struct proc *head, struct proc *node_to_swap));
+FORWARD _PROTOTYPE( struct proc* search_swap, (struct proc *head, int uid));
+
+#define BuildMess(m_ptr, src, dst_ptr) \
+	(m_ptr)->m_source = proc_addr(src)->p_endpoint;		\
+	(m_ptr)->m_type = NOTIFY_FROM(src);				\
+	(m_ptr)->NOTIFY_TIMESTAMP = get_uptime();			\
+	switch (src) {							\
+	case HARDWARE:							\
+		(m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending;	\
+		priv(dst_ptr)->s_int_pending = 0;			\
+		break;							\
+	case SYSTEM:							\
+		(m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending;	\
+		priv(dst_ptr)->s_sig_pending = 0;			\
+		break;							\
+	}
+
+#if (CHIP == INTEL)
+#define CopyMess(s,sp,sm,dp,dm) \
+	cp_mess(proc_addr(s)->p_endpoint, \
+		(sp)->p_memmap[D].mem_phys,	\
+		(vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
+#endif /* (CHIP == INTEL) */
+
+#if (CHIP == M68000)
+/* M68000 does not have cp_mess() in assembly like INTEL. Declare prototype
+ * for cp_mess() here and define the function below. Also define CopyMess.
+ */
+#endif /* (CHIP == M68000) */
+
+/*===========================================================================*
+ *				sys_call				     *
+ *===========================================================================*/
+PUBLIC int sys_call(call_nr, src_dst_e, m_ptr, bit_map)
+int call_nr;			/* system call number and flags */
+int src_dst_e;			/* src to receive from or dst to send to */
+message *m_ptr;			/* pointer to message in the caller's space */
+long bit_map;			/* notification event set or flags */
+{
+/* System calls are done by trapping to the kernel with an INT instruction.
+ * The trap is caught and sys_call() is called to send or receive a message
+ * (or both). The caller is always given by 'proc_ptr'.
+ */
+  register struct proc *caller_ptr = proc_ptr;	/* get pointer to caller */
+  int function = call_nr & SYSCALL_FUNC;	/* get system call function */
+  unsigned flags = call_nr & SYSCALL_FLAGS;	/* get flags */
+  int mask_entry;				/* bit to check in send mask */
+  int group_size;				/* used for deadlock check */
+  int result;					/* the system call's result */
+  int src_dst;
+  vir_clicks vlo, vhi;		/* virtual clicks containing message to send */
+
+#if 0
+  if (caller_ptr->p_rts_flags & SLOT_FREE)
+  {
+	kprintf("called by the dead?!?\n");
+	return EINVAL;
+  }
+#endif
+
+  /* Require a valid source and/ or destination process, unless echoing. */
+  if (src_dst_e != ANY && function != ECHO) {
+      if(!isokendpt(src_dst_e, &src_dst)) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+          kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
+              function, proc_nr(caller_ptr), src_dst_e);
+#endif
+	  return EDEADSRCDST;
+      }
+  } else src_dst = src_dst_e;
+
+  /* Check if the process has privileges for the requested call. Calls to the
+   * kernel may only be SENDREC, because tasks always reply and may not block
+   * if the caller doesn't do receive().
+   */
+  if (! (priv(caller_ptr)->s_trap_mask & (1 << function)) ||
+          (iskerneln(src_dst) && function != SENDREC
+           && function != RECEIVE)) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+      kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
+          function, proc_nr(caller_ptr), src_dst);
+#endif
+      return(ETRAPDENIED);		/* trap denied by mask or kernel */
+  }
+
+  /* If the call involves a message buffer, i.e., for SEND, RECEIVE, SENDREC,
+   * or ECHO, check the message pointer. This check allows a message to be
+   * anywhere in data or stack or gap. It will have to be made more elaborate
+   * for machines which don't have the gap mapped.
+   */
+  if (function & CHECK_PTR) {
+      vlo = (vir_bytes) m_ptr >> CLICK_SHIFT;
+      vhi = ((vir_bytes) m_ptr + MESS_SIZE - 1) >> CLICK_SHIFT;
+      if (vlo < caller_ptr->p_memmap[D].mem_vir || vlo > vhi ||
+              vhi >= caller_ptr->p_memmap[S].mem_vir +
+              caller_ptr->p_memmap[S].mem_len) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+          kprintf("sys_call: invalid message pointer, trap %d, caller %d\n",
+          	function, proc_nr(caller_ptr));
+#endif
+          return(EFAULT); 		/* invalid message pointer */
+      }
+  }
+
+  /* If the call is to send to a process, i.e., for SEND, SENDREC or NOTIFY,
+   * verify that the caller is allowed to send to the given destination.
+   */
+  if (function & CHECK_DST) {
+      if (! get_sys_bit(priv(caller_ptr)->s_ipc_to, nr_to_id(src_dst))) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+          kprintf("sys_call: ipc mask denied trap %d from %d to %d\n",
+          	function, proc_nr(caller_ptr), src_dst);
+#endif
+          return(ECALLDENIED);		/* call denied by ipc mask */
+      }
+  }
+
+  /* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
+  if (function & CHECK_DEADLOCK) {
+      if (group_size = deadlock(function, caller_ptr, src_dst)) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+          kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
+              function, proc_nr(caller_ptr), src_dst, group_size);
+#endif
+          return(ELOCKED);
+      }
+  }
+
+  /* Now check if the call is known and try to perform the request. The only
+   * system calls that exist in MINIX are sending and receiving messages.
+   *   - SENDREC: combines SEND and RECEIVE in a single system call
+   *   - SEND:    sender blocks until its message has been delivered
+   *   - RECEIVE: receiver blocks until an acceptable message has arrived
+   *   - NOTIFY:  nonblocking call; deliver notification or mark pending
+   *   - ECHO:    nonblocking call; directly echo back the message
+   */
+  switch(function) {
+  case SENDREC:
+      /* A flag is set so that notifications cannot interrupt SENDREC. */
+      caller_ptr->p_misc_flags |= REPLY_PENDING;
+      /* fall through */
+  case SEND:
+      result = mini_send(caller_ptr, src_dst_e, m_ptr, flags);
+      if (function == SEND || result != OK) {
+          break;				/* done, or SEND failed */
+      }						/* fall through for SENDREC */
+  case RECEIVE:
+      if (function == RECEIVE)
+          caller_ptr->p_misc_flags &= ~REPLY_PENDING;
+      result = mini_receive(caller_ptr, src_dst_e, m_ptr, flags);
+      break;
+  case NOTIFY:
+      result = mini_notify(caller_ptr, src_dst);
+      break;
+  case ECHO:
+      CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, caller_ptr, m_ptr);
+      result = OK;
+      break;
+  default:
+      result = EBADCALL;			/* illegal system call */
+  }
+
+  /* Now, return the result of the system call to the caller. */
+  return(result);
+}
+
+/*===========================================================================*
+ *				deadlock				     *
+ *===========================================================================*/
+PRIVATE int deadlock(function, cp, src_dst)
+int function;					/* trap number */
+register struct proc *cp;			/* pointer to caller */
+int src_dst;					/* src or dst process */
+{
+/* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
+ * a cyclic dependency of blocking send and receive calls. The only cyclic
+ * depency that is not fatal is if the caller and target directly SEND(REC)
+ * and RECEIVE to each other. If a deadlock is found, the group size is
+ * returned. Otherwise zero is returned.
+ */
+  register struct proc *xp;			/* process pointer */
+  int group_size = 1;				/* start with only caller */
+  int trap_flags;
+
+  while (src_dst != ANY) { 			/* check while process nr */
+      int src_dst_e;
+      xp = proc_addr(src_dst);			/* follow chain of processes */
+      group_size ++;				/* extra process in group */
+
+      /* Check whether the last process in the chain has a dependency. If it
+       * has not, the cycle cannot be closed and we are done.
+       */
+      if (xp->p_rts_flags & RECEIVING) {	/* xp has dependency */
+	  if(xp->p_getfrom_e == ANY) src_dst = ANY;
+	  else okendpt(xp->p_getfrom_e, &src_dst);
+      } else if (xp->p_rts_flags & SENDING) {	/* xp has dependency */
+	  okendpt(xp->p_sendto_e, &src_dst);
+      } else {
+	  return(0);				/* not a deadlock */
+      }
+
+      /* Now check if there is a cyclic dependency. For group sizes of two,
+       * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
+       * or other combinations indicate a deadlock.
+       */
+      if (src_dst == proc_nr(cp)) {		/* possible deadlock */
+	  if (group_size == 2) {		/* caller and src_dst */
+	      /* The function number is magically converted to flags. */
+	      if ((xp->p_rts_flags ^ (function << 2)) & SENDING) {
+	          return(0);			/* not a deadlock */
+	      }
+	  }
+          return(group_size);			/* deadlock found */
+      }
+  }
+  return(0);					/* not a deadlock */
+}
+
+/*===========================================================================*
+ *				mini_send				     *
+ *===========================================================================*/
+PRIVATE int mini_send(caller_ptr, dst_e, m_ptr, flags)
+register struct proc *caller_ptr;	/* who is trying to send a message? */
+int dst_e;				/* to whom is message being sent? */
+message *m_ptr;				/* pointer to message buffer */
+unsigned flags;				/* system call flags */
+{
+/* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
+ * for this message, copy the message to it and unblock 'dst'. If 'dst' is
+ * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
+ */
+  register struct proc *dst_ptr;
+  register struct proc **xpp;
+  int dst_p;
+
+  dst_p = _ENDPOINT_P(dst_e);
+  dst_ptr = proc_addr(dst_p);
+
+  if (dst_ptr->p_rts_flags & NO_ENDPOINT) return EDSTDIED;
+
+  /* Check if 'dst' is blocked waiting for this message. The destination's
+   * SENDING flag may be set when its SENDREC call blocked while sending.
+   */
+  if ( (dst_ptr->p_rts_flags & (RECEIVING | SENDING)) == RECEIVING &&
+       (dst_ptr->p_getfrom_e == ANY
+         || dst_ptr->p_getfrom_e == caller_ptr->p_endpoint)) {
+	/* Destination is indeed waiting for this message. */
+	CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
+		 dst_ptr->p_messbuf);
+	if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) enqueue(dst_ptr);
+  } else if ( ! (flags & NON_BLOCKING)) {
+	/* Destination is not waiting.  Block and dequeue caller. */
+	caller_ptr->p_messbuf = m_ptr;
+	if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
+	caller_ptr->p_rts_flags |= SENDING;
+	caller_ptr->p_sendto_e = dst_e;
+
+	/* Process is now blocked.  Put in on the destination's queue. */
+	xpp = &dst_ptr->p_caller_q;		/* find end of list */
+	while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
+	*xpp = caller_ptr;			/* add caller to end */
+	caller_ptr->p_q_link = NIL_PROC;	/* mark new end of list */
+  } else {
+	return(ENOTREADY);
+  }
+  return(OK);
+}
+
+/*===========================================================================*
+ *				mini_receive				     *
+ *===========================================================================*/
+PRIVATE int mini_receive(caller_ptr, src_e, m_ptr, flags)
+register struct proc *caller_ptr;	/* process trying to get message */
+int src_e;				/* which message source is wanted */
+message *m_ptr;				/* pointer to message buffer */
+unsigned flags;				/* system call flags */
+{
+/* A process or task wants to get a message.  If a message is already queued,
+ * acquire it and deblock the sender.  If no message from the desired source
+ * is available block the caller, unless the flags don't allow blocking.
+ */
+  register struct proc **xpp;
+  register struct notification **ntf_q_pp;
+  message m;
+  int bit_nr;
+  sys_map_t *map;
+  bitchunk_t *chunk;
+  int i, src_id, src_proc_nr, src_p;
+
+  if(src_e == ANY) src_p = ANY;
+  else
+  {
+	okendpt(src_e, &src_p);
+	if (proc_addr(src_p)->p_rts_flags & NO_ENDPOINT) return ESRCDIED;
+  }
+
+
+  /* Check to see if a message from desired source is already available.
+   * The caller's SENDING flag may be set if SENDREC couldn't send. If it is
+   * set, the process should be blocked.
+   */
+  if (!(caller_ptr->p_rts_flags & SENDING)) {
+
+    /* Check if there are pending notifications, except for SENDREC. */
+    if (! (caller_ptr->p_misc_flags & REPLY_PENDING)) {
+
+        map = &priv(caller_ptr)->s_notify_pending;
+        for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
+
+            /* Find a pending notification from the requested source. */
+            if (! *chunk) continue; 			/* no bits in chunk */
+            for (i=0; ! (*chunk & (1<<i)); ++i) {} 	/* look up the bit */
+            src_id = (chunk - &map->chunk[0]) * BITCHUNK_BITS + i;
+            if (src_id >= NR_SYS_PROCS) break;		/* out of range */
+            src_proc_nr = id_to_nr(src_id);		/* get source proc */
+#if DEBUG_ENABLE_IPC_WARNINGS
+	    if(src_proc_nr == NONE) {
+		kprintf("mini_receive: sending notify from NONE\n");
+	    }
+#endif
+            if (src_e!=ANY && src_p != src_proc_nr) continue;/* source not ok */
+            *chunk &= ~(1 << i);			/* no longer pending */
+
+            /* Found a suitable source, deliver the notification message. */
+	    BuildMess(&m, src_proc_nr, caller_ptr);	/* assemble message */
+            CopyMess(src_proc_nr, proc_addr(HARDWARE), &m, caller_ptr, m_ptr);
+            return(OK);					/* report success */
+        }
+    }
+
+    /* Check caller queue. Use pointer pointers to keep code simple. */
+    xpp = &caller_ptr->p_caller_q;
+    while (*xpp != NIL_PROC) {
+        if (src_e == ANY || src_p == proc_nr(*xpp)) {
+#if 0
+	    if ((*xpp)->p_rts_flags & SLOT_FREE)
+	    {
+		kprintf("listening to the dead?!?\n");
+		return EINVAL;
+	    }
+#endif
+
+	    /* Found acceptable message. Copy it and update status. */
+	    CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
+            if (((*xpp)->p_rts_flags &= ~SENDING) == 0) enqueue(*xpp);
+            *xpp = (*xpp)->p_q_link;		/* remove from queue */
+            return(OK);				/* report success */
+	}
+	xpp = &(*xpp)->p_q_link;		/* proceed to next */
+    }
+  }
+
+  /* No suitable message is available or the caller couldn't send in SENDREC.
+   * Block the process trying to receive, unless the flags tell otherwise.
+   */
+  if ( ! (flags & NON_BLOCKING)) {
+      caller_ptr->p_getfrom_e = src_e;
+      caller_ptr->p_messbuf = m_ptr;
+      if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
+      caller_ptr->p_rts_flags |= RECEIVING;
+      return(OK);
+  } else {
+      return(ENOTREADY);
+  }
+}
+
+/*===========================================================================*
+ *				mini_notify				     *
+ *===========================================================================*/
+PRIVATE int mini_notify(caller_ptr, dst)
+register struct proc *caller_ptr;	/* sender of the notification */
+int dst;				/* which process to notify */
+{
+  register struct proc *dst_ptr = proc_addr(dst);
+  int src_id;				/* source id for late delivery */
+  message m;				/* the notification message */
+
+  /* Check to see if target is blocked waiting for this message. A process
+   * can be both sending and receiving during a SENDREC system call.
+   */
+  if ((dst_ptr->p_rts_flags & (RECEIVING|SENDING)) == RECEIVING &&
+      ! (dst_ptr->p_misc_flags & REPLY_PENDING) &&
+      (dst_ptr->p_getfrom_e == ANY ||
+      dst_ptr->p_getfrom_e == caller_ptr->p_endpoint)) {
+
+      /* Destination is indeed waiting for a message. Assemble a notification
+       * message and deliver it. Copy from pseudo-source HARDWARE, since the
+       * message is in the kernel's address space.
+       */
+      BuildMess(&m, proc_nr(caller_ptr), dst_ptr);
+      CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m,
+          dst_ptr, dst_ptr->p_messbuf);
+      dst_ptr->p_rts_flags &= ~RECEIVING;	/* deblock destination */
+      if (dst_ptr->p_rts_flags == 0) enqueue(dst_ptr);
+      return(OK);
+  }
+
+  /* Destination is not ready to receive the notification. Add it to the
+   * bit map with pending notifications. Note the indirectness: the system id
+   * instead of the process number is used in the pending bit map.
+   */
+  src_id = priv(caller_ptr)->s_id;
+  set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
+  return(OK);
+}
+
+/*===========================================================================*
+ *				lock_notify				     *
+ *===========================================================================*/
+PUBLIC int lock_notify(src_e, dst_e)
+int src_e;			/* (endpoint) sender of the notification */
+int dst_e;			/* (endpoint) who is to be notified */
+{
+/* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
+ * is explicitely given to prevent confusion where the call comes from. MINIX
+ * kernel is not reentrant, which means to interrupts are disabled after
+ * the first kernel entry (hardware interrupt, trap, or exception). Locking
+ * is done by temporarily disabling interrupts.
+ */
+  int result, src, dst;
+
+  if(!isokendpt(src_e, &src) || !isokendpt(dst_e, &dst))
+	return EDEADSRCDST;
+
+  /* Exception or interrupt occurred, thus already locked. */
+  if (k_reenter >= 0) {
+      result = mini_notify(proc_addr(src), dst);
+  }
+
+  /* Call from task level, locking is required. */
+  else {
+      lock(0, "notify");
+      result = mini_notify(proc_addr(src), dst);
+      unlock(0);
+  }
+  return(result);
+}
+
+/*===========================================================================*
+ *				enqueue					     *
+ *===========================================================================*/
+PRIVATE void enqueue(rp)
+register struct proc *rp;	/* this process is now runnable */
+{
+/* Add 'rp' to one of the queues of runnable processes.  This function is
+ * responsible for inserting a process into one of the scheduling queues.
+ * The mechanism is implemented here.   The actual scheduling policy is
+ * defined in sched() and pick_proc().
+ */
+  int q;	 				/* scheduling queue to use */
+  int front;					/* add to front or back */
+
+#if DEBUG_SCHED_CHECK
+  check_runqueues("enqueue");
+  if (rp->p_ready) kprintf("enqueue() already ready process\n");
+#endif
+
+  /* Determine where to insert to process. */
+  sched(rp, &q, &front);
+
+  /* Now add the process to the queue. */
+  if (rdy_head[q] == NIL_PROC) {		/* add to empty queue */
+      rdy_head[q] = rdy_tail[q] = rp; 		/* create a new queue */
+      rp->p_nextready = NIL_PROC;		/* mark new end */
+  }
+  else if (front) {				/* add to head of queue */
+      rp->p_nextready = rdy_head[q];		/* chain head of queue */
+      rdy_head[q] = rp;				/* set new queue head */
+  }
+  else {					/* add to tail of queue */
+      rdy_tail[q]->p_nextready = rp;		/* chain tail of queue */
+      rdy_tail[q] = rp;				/* set new queue tail */
+      rp->p_nextready = NIL_PROC;		/* mark new end */
+  }
+
+  if(rp->p_uid>=0)
+  {
+	search_add(rp->p_uid);
+  }
+
+  /* Now select the next process to run. */
+  pick_proc();
+
+#if DEBUG_SCHED_CHECK
+  rp->p_ready = 1;
+  check_runqueues("enqueue");
+#endif
+}
+
+/*===========================================================================*
+ *				dequeue					     *
+ *===========================================================================*/
+PRIVATE void dequeue(rp)
+register struct proc *rp;	/* this process is no longer runnable */
+{
+/* A process must be removed from the scheduling queues, for example, because
+ * it has blocked.  If the currently active process is removed, a new process
+ * is picked to run by calling pick_proc().
+ */
+  register int q = rp->p_priority;		/* queue to use */
+  register struct proc **xpp;			/* iterate over queue */
+  register struct proc *prev_xp;
+
+  /* Side-effect for kernel: check if the task's stack still is ok? */
+  if (iskernelp(rp)) {
+	if (*priv(rp)->s_stack_guard != STACK_GUARD)
+		panic("stack overrun by task", proc_nr(rp));
+  }
+
+#if DEBUG_SCHED_CHECK
+  check_runqueues("dequeue");
+  if (! rp->p_ready) kprintf("dequeue() already unready process\n");
+#endif
+
+  /* Now make sure that the process is not in its ready queue. Remove the
+   * process if it is found. A process can be made unready even if it is not
+   * running by being sent a signal that kills it.
+   */
+  prev_xp = NIL_PROC;
+  for (xpp = &rdy_head[q]; *xpp != NIL_PROC; xpp = &(*xpp)->p_nextready) {
+
+      if (*xpp == rp) {				/* found process to remove */
+          *xpp = (*xpp)->p_nextready;		/* replace with next chain */
+          if (rp == rdy_tail[q])		/* queue tail removed */
+              rdy_tail[q] = prev_xp;		/* set new tail */
+          if (rp == proc_ptr || rp == next_ptr)	/* active process removed */
+              pick_proc();			/* pick new process to run */
+          break;
+      }
+      prev_xp = *xpp;				/* save previous in chain */
+  }
+
+#if DEBUG_SCHED_CHECK
+  rp->p_ready = 0;
+  check_runqueues("dequeue");
+#endif
+}
+
+/*===========================================================================*
+ *				sched					     *
+ *===========================================================================*/
+PRIVATE void sched(rp, queue, front)
+register struct proc *rp;			/* process to be scheduled */
+int *queue;					/* return: queue to use */
+int *front;					/* return: front or back */
+{
+/* This function determines the scheduling policy.  It is called whenever a
+ * process must be added to one of the scheduling queues to decide where to
+ * insert it.  As a side-effect the process' priority may be updated.
+ */
+  int time_left = (rp->p_ticks_left > 0);	/* quantum fully consumed */
+
+  /* Check whether the process has time left. Otherwise give a new quantum
+   * and lower the process' priority, unless the process already is in the
+   * lowest queue.
+   */
+  if (! time_left) {				/* quantum consumed ? */
+      rp->p_ticks_left = rp->p_quantum_size; 	/* give new quantum */
+      if (rp->p_priority < (IDLE_Q-1)) {
+          rp->p_priority += 1;			/* lower priority */
+      }
+  }
+
+  /* If there is time left, the process is added to the front of its queue,
+   * so that it can immediately run. The queue to use simply is always the
+   * process' current priority.
+   */
+  *queue = rp->p_priority;
+  *front = time_left;
+}
+
+/*===========================================================================*
+ *				pick_proc				     *
+ *===========================================================================*/
+PRIVATE void pick_proc()
+{
+/* Decide who to run now.  A new process is selected by setting 'next_ptr'.
+ * When a billable process is selected, record it in 'bill_ptr', so that the
+ * clock task can tell who to bill for system time.
+ */
+  register struct proc *rp;			/* process to run */
+  int q;					/* iterate over queues */
+  struct proc *temp;  
+  int j;
+
+  /* Check each of the scheduling queues for ready processes. The number of
+   * queues is defined in proc.h, and priorities are set in the task table.
+   * The lowest queue contains IDLE, which is always ready.
+   */
+
+  for (q=0; q < USER_Q; q++) {
+      if ( (rp = rdy_head[q]) != NIL_PROC) {
+          next_ptr = rp;			/* run process 'rp' next */
+          if (priv(rp)->s_flags & BILLABLE)
+              bill_ptr = rp;			/* bill for system time */
+          return;
+      }
+  }
+
+  for(j=0;j<50;j++)
+  {
+  	for (q=USER_Q; q < NR_SCHED_QUEUES -1;q++)
+  	{
+		if ((rp = rdy_head[q]) != NIL_PROC)
+		{
+			temp = search_swap(rp,list_of_uid[next_user]);
+			if(temp != NIL_PROC)
+			{
+				rdy_head[q] = temp;
+				rp = temp;
+				next_ptr = rp;
+				if (priv(rp)->s_flags & BILLABLE)
+					bill_ptr = rp;
+				next_user_function();
+				return;
+			}
+		}
+	}
+	remove_from_list(rp->p_uid);
+	next_user_function();
+  }
+  if((rp=rdy_head[IDLE_Q])!=NIL_PROC){
+  	next_ptr = rp;
+  	if (priv(rp)->s_flags & BILLABLE)
+		bill_ptr = rp;
+  	return;
+  }
+}
+
+/*===========================================================================*
+ *				balance_queues				     *
+ *===========================================================================*/
+#define Q_BALANCE_TICKS	 100
+PUBLIC void balance_queues(tp)
+timer_t *tp;					/* watchdog timer pointer */
+{
+/* Check entire process table and give all process a higher priority. This
+ * effectively means giving a new quantum. If a process already is at its
+ * maximum priority, its quantum will be renewed.
+ */
+  static timer_t queue_timer;			/* timer structure to use */
+  register struct proc* rp;			/* process table pointer  */
+  clock_t next_period;				/* time of next period  */
+  int ticks_added = 0;				/* total time added */
+
+  for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
+      if (! isemptyp(rp)) {				/* check slot use */
+	  lock(5,"balance_queues");
+	  if (rp->p_priority > rp->p_max_priority) {	/* update priority? */
+	      if (rp->p_rts_flags == 0) dequeue(rp);	/* take off queue */
+	      ticks_added += rp->p_quantum_size;	/* do accounting */
+	      rp->p_priority -= 1;			/* raise priority */
+	      if (rp->p_rts_flags == 0) enqueue(rp);	/* put on queue */
+	  }
+	  else {
+	      ticks_added += rp->p_quantum_size - rp->p_ticks_left;
+              rp->p_ticks_left = rp->p_quantum_size; 	/* give new quantum */
+	  }
+	  unlock(5);
+      }
+  }
+#if DEBUG
+  kprintf("ticks_added: %d\n", ticks_added);
+#endif
+
+  /* Now schedule a new watchdog timer to balance the queues again.  The
+   * period depends on the total amount of quantum ticks added.
+   */
+  next_period = MAX(Q_BALANCE_TICKS, ticks_added);	/* calculate next */
+  set_timer(&queue_timer, get_uptime() + next_period, balance_queues);
+}
+
+/*===========================================================================*
+ *				lock_send				     *
+ *===========================================================================*/
+PUBLIC int lock_send(dst_e, m_ptr)
+int dst_e;			/* to whom is message being sent? */
+message *m_ptr;			/* pointer to message buffer */
+{
+/* Safe gateway to mini_send() for tasks. */
+  int result;
+  lock(2, "send");
+  result = mini_send(proc_ptr, dst_e, m_ptr, NON_BLOCKING);
+  unlock(2);
+  return(result);
+}
+
+/*===========================================================================*
+ *				lock_enqueue				     *
+ *===========================================================================*/
+PUBLIC void lock_enqueue(rp)
+struct proc *rp;		/* this process is now runnable */
+{
+/* Safe gateway to enqueue() for tasks. */
+  lock(3, "enqueue");
+  enqueue(rp);
+  unlock(3);
+}
+
+/*===========================================================================*
+ *				lock_dequeue				     *
+ *===========================================================================*/
+PUBLIC void lock_dequeue(rp)
+struct proc *rp;		/* this process is no longer runnable */
+{
+/* Safe gateway to dequeue() for tasks. */
+  if (k_reenter >= 0) {
+	/* We're in an exception or interrupt, so don't lock (and ...
+	 * don't unlock).
+	 */
+	dequeue(rp);
+  } else {
+	lock(4, "dequeue");
+	dequeue(rp);
+	unlock(4);
+  }
+}
+
+/*===========================================================================*
+ *				isokendpt_f				     *
+ *===========================================================================*/
+#if DEBUG_ENABLE_IPC_WARNINGS
+PUBLIC int isokendpt_f(file, line, e, p, fatalflag)
+char *file;
+int line;
+#else
+PUBLIC int isokendpt_f(e, p, fatalflag)
+#endif
+int e, *p, fatalflag;
+{
+	int ok = 0;
+	/* Convert an endpoint number into a process number.
+	 * Return nonzero if the process is alive with the corresponding
+	 * generation number, zero otherwise.
+	 *
+	 * This function is called with file and line number by the
+	 * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
+	 * otherwise without. This allows us to print the where the
+	 * conversion was attempted, making the errors verbose without
+	 * adding code for that at every call.
+	 *
+	 * If fatalflag is nonzero, we must panic if the conversion doesn't
+	 * succeed.
+	 */
+	*p = _ENDPOINT_P(e);
+	if(!isokprocn(*p)) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+		kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
+		file, line, e, *p);
+#endif
+	} else if(isemptyn(*p)) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+	kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file, line, e, *p);
+#endif
+	} else if(proc_addr(*p)->p_endpoint != e) {
+#if DEBUG_ENABLE_IPC_WARNINGS
+		kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file, line,
+		e, *p, proc_addr(*p)->p_endpoint,
+		_ENDPOINT_G(e), _ENDPOINT_G(proc_addr(*p)->p_endpoint));
+#endif
+	} else ok = 1;
+	if(!ok && fatalflag) {
+		panic("invalid endpoint ", e);
+	}
+	return ok;
+}
+
+
+/* search for a user process with a specific uid and if not found
+ * delete the user from the list of users, else return the process
+ * found */
+struct proc* search_proc(struct proc *head, int uid)
+{
+    struct proc *temp;
+    temp = head;
+
+    /* find the process with the specific uid */
+    while (temp != NIL_PROC && temp->p_uid != uid)
+    {
+        temp = temp->p_nextready;
+    }
+
+    /* if not found, remove the user from the user's list */
+    if (temp == NIL_PROC)
+    {
+        /* remove the user who has not any process to run and return his
+         * position in the list */
+        remove_from_list(uid);
+    }
+
+    /* return the process which has been found or NULL if nothing is found */
+    return temp;
+}
+
+/* the process which is going to run is transfered at the
+ * head of the list. That serves to minimize the necessary changes
+ * in the code */
+struct proc* swap_head(struct proc *head, struct proc *node_to_swap)
+{
+    struct proc *temp;
+
+    temp = head;
+
+    if (head == node_to_swap)
+    {
+        return head;
+    }
+
+    /* find the previous node of node_to_swap */
+    while (temp->p_nextready != node_to_swap)
+    {
+        temp = temp->p_nextready;
+    }
+
+    /* add the node_to_swap to the head of the list */
+    temp->p_nextready = node_to_swap->p_nextready;
+    node_to_swap->p_nextready = head;
+
+    /* return the new head of the list which is actually
+     * the node swapped */
+    return node_to_swap;
+}
+
+/* search for a process with a specific uid and if it is found
+ * put it in the head of its queue */
+struct proc* search_swap(struct proc *head, int uid)
+{
+    struct proc *process;
+    /* search for the process with the specific uid */
+    process = search_proc(head, uid);
+
+    /* if it's found put it in the head of the list */
+    if (process != NIL_PROC)
+    {
+        process = swap_head(head, process);
+    }
+
+    /* return the process to be executed */
+    return process;
+}
+

File 2/usr/src/kernel/proc.h

+#ifndef PROC_H
+#define PROC_H
+
+/* Here is the declaration of the process table.  It contains all process
+ * data, including registers, flags, scheduling priority, memory map,
+ * accounting, message passing (IPC) information, and so on.
+ *
+ * Many assembly code routines reference fields in it.  The offsets to these
+ * fields are defined in the assembler include file sconst.h.  When changing
+ * struct proc, be sure to change sconst.h to match.
+ */
+#include <minix/com.h>
+#include "protect.h"
+#include "const.h"
+#include "priv.h"
+
+#define MAX_USERS 50
+#define EMPTY -40000
+
+struct proc {
+  struct stackframe_s p_reg;	/* process' registers saved in stack frame */
+
+#if (CHIP == INTEL)
+  reg_t p_ldt_sel;		/* selector in gdt with ldt base and limit */
+  struct segdesc_s p_ldt[2+NR_REMOTE_SEGS]; /* CS, DS and remote segments */
+#endif
+
+#if (CHIP == M68000)
+/* M68000 specific registers and FPU details go here. */
+#endif
+
+  proc_nr_t p_nr;		/* number of this process (for fast access) */
+  struct priv *p_priv;		/* system privileges structure */
+  short p_rts_flags;		/* process is runnable only if zero */
+  short p_misc_flags;		/* flags that do suspend the process */
+
+  char p_priority;		/* current scheduling priority */
+  char p_max_priority;		/* maximum scheduling priority */
+  char p_ticks_left;		/* number of scheduling ticks left */
+  char p_quantum_size;		/* quantum size in ticks */
+
+  struct mem_map p_memmap[NR_LOCAL_SEGS];   /* memory map (T, D, S) */
+
+  clock_t p_user_time;		/* user time in ticks */
+  clock_t p_sys_time;		/* sys time in ticks */
+
+  struct proc *p_nextready;	/* pointer to next ready process */
+  struct proc *p_caller_q;	/* head of list of procs wishing to send */
+  struct proc *p_q_link;	/* link to next proc wishing to send */
+  message *p_messbuf;		/* pointer to passed message buffer */
+  int p_getfrom_e;		/* from whom does process want to receive? */
+  int p_sendto_e;		/* to whom does process want to send? */
+
+  sigset_t p_pending;		/* bit map for pending kernel signals */
+
+  char p_name[P_NAME_LEN];	/* name of the process, including \0 */
+
+  int p_endpoint;		/* endpoint number, generation-aware */
+
+  uid_t p_uid;			/* the realuid which is passed from the mproc */
+
+#if DEBUG_SCHED_CHECK
+  int p_ready, p_found;
+#endif
+};
+
+/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
+#define SLOT_FREE	0x01	/* process slot is free */
+#define NO_MAP		0x02	/* keeps unmapped forked child from running */
+#define SENDING		0x04	/* process blocked trying to send */
+#define RECEIVING	0x08	/* process blocked trying to receive */
+#define SIGNALED	0x10	/* set when new kernel signal arrives */
+#define SIG_PENDING	0x20	/* unready while signal being processed */
+#define P_STOP		0x40	/* set when process is being traced */
+#define NO_PRIV		0x80	/* keep forked system process from running */
+#define NO_PRIORITY    0x100	/* process has been stopped */
+#define NO_ENDPOINT    0x200	/* process cannot send or receive messages */
+
+/* Misc flags */
+#define REPLY_PENDING	0x01	/* reply to IPC_REQUEST is pending */
+#define MF_VM		0x08	/* process uses VM */
+
+/* Scheduling priorities for p_priority. Values must start at zero (highest
+ * priority) and increment.  Priorities of the processes in the boot image
+ * can be set in table.c. IDLE must have a queue for itself, to prevent low
+ * priority user processes to run round-robin with IDLE.
+ */
+#define NR_SCHED_QUEUES   16	/* MUST equal minimum priority + 1 */
+#define TASK_Q		   0	/* highest, used for kernel tasks */
+#define MAX_USER_Q  	   7    /* highest priority for user processes */
+#define USER_Q  	   7    /* default (should correspond to nice 0) */
+#define MIN_USER_Q	  14	/* minimum priority for user processes */
+#define IDLE_Q		  15    /* lowest, only IDLE process goes here */
+
+/* Magic process table addresses. */
+#define BEG_PROC_ADDR (&proc[0])
+#define BEG_USER_ADDR (&proc[NR_TASKS])
+#define END_PROC_ADDR (&proc[NR_TASKS + NR_PROCS])
+
+#define NIL_PROC          ((struct proc *) 0)
+#define NIL_SYS_PROC      ((struct proc *) 1)
+#define cproc_addr(n)     (&(proc + NR_TASKS)[(n)])
+#define proc_addr(n)      (pproc_addr + NR_TASKS)[(n)]
+#define proc_nr(p) 	  ((p)->p_nr)
+
+#define isokprocn(n)      ((unsigned) ((n) + NR_TASKS) < NR_PROCS + NR_TASKS)
+#define isemptyn(n)       isemptyp(proc_addr(n))
+#define isemptyp(p)       ((p)->p_rts_flags == SLOT_FREE)
+#define iskernelp(p)	  iskerneln((p)->p_nr)
+#define iskerneln(n)	  ((n) < 0)
+#define isuserp(p)        isusern((p)->p_nr)
+#define isusern(n)        ((n) >= 0)
+
+/* The process table and pointers to process table slots. The pointers allow
+ * faster access because now a process entry can be found by indexing the
+ * pproc_addr array, while accessing an element i requires a multiplication
+ * with sizeof(struct proc) to determine the address.
+ */
+EXTERN struct proc proc[NR_TASKS + NR_PROCS];	/* process table */
+EXTERN struct proc *pproc_addr[NR_TASKS + NR_PROCS];
+EXTERN struct proc *rdy_head[NR_SCHED_QUEUES]; /* ptrs to ready list headers */
+EXTERN struct proc *rdy_tail[NR_SCHED_QUEUES]; /* ptrs to ready list tails */
+/* the array which holds the users' id who have at least one active process */
+EXTERN int list_of_uid[MAX_USERS];
+/* the subscript which points at the next user who will execute a process
+ * that belongs to him */
+EXTERN int next_user;
+
+#endif /* PROC_H */

File 2/usr/src/kernel/system/do_fork.c

+/* The kernel call implemented in this file:
+ *   m_type:	SYS_FORK
+ *
+ * The parameters for this kernel call are:
+ *    m1_i1:	PR_SLOT	 (child's process table slot)	
+ *    m1_i2:	PR_ENDPT (parent, process that forked)	
+ */
+
+#include "../system.h"
+#include <signal.h>
+#if (CHIP == INTEL)
+#include "../protect.h"
+#endif
+
+#include <minix/endpoint.h>
+
+#if USE_FORK
+
+/*===========================================================================*
+ *				do_fork					     *
+ *===========================================================================*/
+PUBLIC int do_fork(m_ptr)
+register message *m_ptr;	/* pointer to request message */
+{
+/* Handle sys_fork().  PR_ENDPT has forked.  The child is PR_SLOT. */
+#if (CHIP == INTEL)
+  reg_t old_ldt_sel;
+#endif
+  register struct proc *rpc;		/* child process pointer */
+  struct proc *rpp;			/* parent process pointer */
+  int i, gen;
+  int p_proc;
+
+  if(!isokendpt(m_ptr->PR_ENDPT, &p_proc))
+	return EINVAL;
+  rpp = proc_addr(p_proc);
+  rpc = proc_addr(m_ptr->PR_SLOT);
+  if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL);
+
+  /* Copy parent 'proc' struct to child. And reinitialize some fields. */
+  gen = _ENDPOINT_G(rpc->p_endpoint);
+#if (CHIP == INTEL)
+  old_ldt_sel = rpc->p_ldt_sel;		/* backup local descriptors */
+  *rpc = *rpp;				/* copy 'proc' struct */
+  rpc->p_ldt_sel = old_ldt_sel;		/* restore descriptors */
+#else
+  *rpc = *rpp;				/* copy 'proc' struct */
+#endif
+  if(++gen >= _ENDPOINT_MAX_GENERATION)	/* increase generation */
+	gen = 1;			/* generation number wraparound */
+  rpc->p_nr = m_ptr->PR_SLOT;		/* this was obliterated by copy */
+  rpc->p_endpoint = _ENDPOINT(gen, rpc->p_nr);	/* new endpoint of slot */
+
+  /* We copy the uid of the process from the mproc structure to the proc
+   * structure. */
+  rpc->p_uid = (uid_t)m_ptr->m1_i3;
+
+  /* Only one in group should have SIGNALED, child doesn't inherit tracing. */
+  rpc->p_rts_flags |= NO_MAP;		/* inhibit process from running */
+  rpc->p_rts_flags &= ~(SIGNALED | SIG_PENDING | P_STOP);
+  sigemptyset(&rpc->p_pending);
+
+  rpc->p_reg.retreg = 0;	/* child sees pid = 0 to know it is child */
+  rpc->p_user_time = 0;		/* set all the accounting times to 0 */
+  rpc->p_sys_time = 0;
+
+  /* Parent and child have to share the quantum that the forked process had,
+   * so that queued processes do not have to wait longer because of the fork.
+   * If the time left is odd, the child gets an extra tick.
+   */
+  rpc->p_ticks_left = (rpc->p_ticks_left + 1) / 2;
+  rpp->p_ticks_left =  rpp->p_ticks_left / 2;	
+
+  /* If the parent is a privileged process, take away the privileges from the 
+   * child process and inhibit it from running by setting the NO_PRIV flag.
+   * The caller should explicitely set the new privileges before executing.
+   */