mm: free memblock.memory in free_all_bootmem
[deliverable/linux.git] / include / linux / sched / sysctl.h
CommitLineData
cf4aebc2
CW
1#ifndef _SCHED_SYSCTL_H
2#define _SCHED_SYSCTL_H
3
4#ifdef CONFIG_DETECT_HUNG_TASK
cd64647f 5extern int sysctl_hung_task_check_count;
cf4aebc2 6extern unsigned int sysctl_hung_task_panic;
cf4aebc2
CW
7extern unsigned long sysctl_hung_task_timeout_secs;
8extern unsigned long sysctl_hung_task_warnings;
9extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
10 void __user *buffer,
11 size_t *lenp, loff_t *ppos);
12#else
13/* Avoid need for ifdefs elsewhere in the code */
14enum { sysctl_hung_task_timeout_secs = 0 };
15#endif
16
17/*
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
20 * problem.
21 *
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
28 */
29#define MAPCOUNT_ELF_CORE_MARGIN (5)
30#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
31
32extern int sysctl_max_map_count;
33
34extern unsigned int sysctl_sched_latency;
35extern unsigned int sysctl_sched_min_granularity;
36extern unsigned int sysctl_sched_wakeup_granularity;
37extern unsigned int sysctl_sched_child_runs_first;
38
39enum sched_tunable_scaling {
40 SCHED_TUNABLESCALING_NONE,
41 SCHED_TUNABLESCALING_LOG,
42 SCHED_TUNABLESCALING_LINEAR,
43 SCHED_TUNABLESCALING_END,
44};
45extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
46
47extern unsigned int sysctl_numa_balancing_scan_delay;
48extern unsigned int sysctl_numa_balancing_scan_period_min;
49extern unsigned int sysctl_numa_balancing_scan_period_max;
cf4aebc2 50extern unsigned int sysctl_numa_balancing_scan_size;
cf4aebc2
CW
51
52#ifdef CONFIG_SCHED_DEBUG
53extern unsigned int sysctl_sched_migration_cost;
54extern unsigned int sysctl_sched_nr_migrate;
55extern unsigned int sysctl_sched_time_avg;
56extern unsigned int sysctl_timer_migration;
57extern unsigned int sysctl_sched_shares_window;
58
59int sched_proc_update_handler(struct ctl_table *table, int write,
60 void __user *buffer, size_t *length,
61 loff_t *ppos);
62#endif
63#ifdef CONFIG_SCHED_DEBUG
64static inline unsigned int get_sysctl_timer_migration(void)
65{
66 return sysctl_timer_migration;
67}
68#else
69static inline unsigned int get_sysctl_timer_migration(void)
70{
71 return 1;
72}
73#endif
ce0dbbbb
CW
74
75/*
76 * control realtime throttling:
77 *
78 * /proc/sys/kernel/sched_rt_period_us
79 * /proc/sys/kernel/sched_rt_runtime_us
80 */
cf4aebc2
CW
81extern unsigned int sysctl_sched_rt_period;
82extern int sysctl_sched_rt_runtime;
83
84#ifdef CONFIG_CFS_BANDWIDTH
85extern unsigned int sysctl_sched_cfs_bandwidth_slice;
86#endif
87
88#ifdef CONFIG_SCHED_AUTOGROUP
89extern unsigned int sysctl_sched_autogroup_enabled;
90#endif
91
ce0dbbbb
CW
92extern int sched_rr_timeslice;
93
94extern int sched_rr_handler(struct ctl_table *table, int write,
95 void __user *buffer, size_t *lenp,
96 loff_t *ppos);
97
98extern int sched_rt_handler(struct ctl_table *table, int write,
cf4aebc2
CW
99 void __user *buffer, size_t *lenp,
100 loff_t *ppos);
101
102#endif /* _SCHED_SYSCTL_H */
This page took 0.187903 seconds and 5 git commands to generate.