Linux Kernel User-space Process - Demo struct task_struct struct mm_struct IPCLinux Kernel User-space Process - Demo struct task_struct struct mm_struct IPC

Linux Kernel Source:
struct task_struct data-structure ↗
struct mm_struct data-structure ↗
Task state bitmask flags – tsk->state ↗

Here is the struct mm_struct data-structure data-structure (/include/linux/mm_types.h) from the Kernel-source version 4.14 for quick reference:

struct mm_struct {
	struct vm_area_struct *mmap;		/* list of VMAs */
	struct rb_root mm_rb;
	u32 vmacache_seqnum;                   /* per-thread vmacache */
	unsigned long (*get_unmapped_area) (struct file *filp,
				unsigned long addr, unsigned long len,
				unsigned long pgoff, unsigned long flags);
	unsigned long mmap_base;		/* base of mmap area */
	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
	/* Base adresses for compatible mmap() */
	unsigned long mmap_compat_base;
	unsigned long mmap_compat_legacy_base;
	unsigned long task_size;		/* size of task vm space */
	unsigned long highest_vm_end;		/* highest vma end address */
	pgd_t * pgd;

	 * @mm_users: The number of users including userspace.
	 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
	 * to 0 (i.e. when the task exits and there are no other temporary
	 * reference holders), we also release a reference on @mm_count
	 * (which may then free the &struct mm_struct if @mm_count also
	 * drops to 0).
	atomic_t mm_users;

	 * @mm_count: The number of references to &struct mm_struct
	 * (@mm_users count as 1).
	 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
	 * &struct mm_struct is freed.
	atomic_t mm_count;

	atomic_long_t nr_ptes;			/* PTE page table pages */
	atomic_long_t nr_pmds;			/* PMD page table pages */
	int map_count;				/* number of VMAs */

	spinlock_t page_table_lock;		/* Protects page tables and some counters */
	struct rw_semaphore mmap_sem;

	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
						 * together off init_mm.mmlist, and are protected
						 * by mmlist_lock

	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
	unsigned long hiwater_vm;	/* High-water virtual memory usage */

	unsigned long total_vm;		/* Total pages mapped */
	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
	unsigned long pinned_vm;	/* Refcount permanently increased */
	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
	unsigned long stack_vm;		/* VM_STACK */
	unsigned long def_flags;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long start_brk, brk, start_stack;
	unsigned long arg_start, arg_end, env_start, env_end;

	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */

	 * Special counters, in some configurations protected by the
	 * page_table_lock, in other configurations by being atomic.
	struct mm_rss_stat rss_stat;

	struct linux_binfmt *binfmt;

	cpumask_var_t cpu_vm_mask_var;

	/* Architecture-specific MM context */
	mm_context_t context;

	unsigned long flags; /* Must use atomic bitops to access the bits */

	struct core_state *core_state; /* coredumping support */
	atomic_t membarrier_state;
	spinlock_t			ioctx_lock;
	struct kioctx_table __rcu	*ioctx_table;
	 * "owner" points to a task that is regarded as the canonical
	 * user/owner of this mm. All of the following must be true in
	 * order for it to be changed:
	 * current == mm->owner
	 * current->mm != mm
	 * new_owner->mm == mm
	 * new_owner->alloc_lock is held
	struct task_struct __rcu *owner;
	struct user_namespace *user_ns;

	/* store ref to file /proc//exe symlink points to */
	struct file __rcu *exe_file;
	struct mmu_notifier_mm *mmu_notifier_mm;
	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
	struct cpumask cpumask_allocation;
	 * numa_next_scan is the next time that the PTEs will be marked
	 * pte_numa. NUMA hinting faults will gather statistics and migrate
	 * pages to new nodes if necessary.
	unsigned long numa_next_scan;

	/* Restart point for scanning and setting pte_numa */
	unsigned long numa_scan_offset;

	/* numa_scan_seq prevents two threads setting pte_numa */
	int numa_scan_seq;
	 * An operation with batched TLB flushing is going on. Anything that
	 * can move process memory needs to flush the TLB when moving a
	 * PROT_NONE or PROT_NUMA mapped page.
	atomic_t tlb_flush_pending;
	/* See flush_tlb_batched_pending() */
	bool tlb_flush_batched;
	struct uprobes_state uprobes_state;
	atomic_long_t hugetlb_usage;
	struct work_struct async_put_work;

	/* HMM needs to track a few things per mm */
	struct hmm *hmm;
} __randomize_layout;