@@ -1164,6 +1164,7 @@ struct sched_entity {
1164
1164
/* rq "owned" by this entity/group: */
1165
1165
struct cfs_rq * my_q ;
1166
1166
#endif
1167
+
1167
1168
/*
1168
1169
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1169
1170
* removed when useful for applications beyond shares distribution (e.g.
@@ -1191,6 +1192,7 @@ struct sched_rt_entity {
1191
1192
#endif
1192
1193
};
1193
1194
1195
+
1194
1196
struct rcu_node ;
1195
1197
1196
1198
enum perf_event_task_context {
@@ -1596,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled)
1596
1598
}
1597
1599
#endif
1598
1600
1599
- /*
1600
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1601
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1602
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1603
- * values are inverted: lower p->prio value means higher priority.
1604
- *
1605
- * The MAX_USER_RT_PRIO value allows the actual maximum
1606
- * RT priority to be separate from the value exported to
1607
- * user-space. This allows kernel threads to set their
1608
- * priority to a value higher than any user task. Note:
1609
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1610
- */
1611
-
1612
- #define MAX_USER_RT_PRIO 100
1613
- #define MAX_RT_PRIO MAX_USER_RT_PRIO
1614
-
1615
- #define MAX_PRIO (MAX_RT_PRIO + 40)
1616
- #define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1617
-
1618
- static inline int rt_prio (int prio )
1619
- {
1620
- if (unlikely (prio < MAX_RT_PRIO ))
1621
- return 1 ;
1622
- return 0 ;
1623
- }
1624
-
1625
- static inline int rt_task (struct task_struct * p )
1626
- {
1627
- return rt_prio (p -> prio );
1628
- }
1629
-
1630
1601
static inline struct pid * task_pid (struct task_struct * task )
1631
1602
{
1632
1603
return task -> pids [PIDTYPE_PID ].pid ;
@@ -2054,26 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2054
2025
static inline void sched_autogroup_exit (struct signal_struct * sig ) { }
2055
2026
#endif
2056
2027
2057
- #ifdef CONFIG_RT_MUTEXES
2058
- extern int rt_mutex_getprio (struct task_struct * p );
2059
- extern void rt_mutex_setprio (struct task_struct * p , int prio );
2060
- extern void rt_mutex_adjust_pi (struct task_struct * p );
2061
- static inline bool tsk_is_pi_blocked (struct task_struct * tsk )
2062
- {
2063
- return tsk -> pi_blocked_on != NULL ;
2064
- }
2065
- #else
2066
- static inline int rt_mutex_getprio (struct task_struct * p )
2067
- {
2068
- return p -> normal_prio ;
2069
- }
2070
- # define rt_mutex_adjust_pi (p ) do { } while (0)
2071
- static inline bool tsk_is_pi_blocked (struct task_struct * tsk )
2072
- {
2073
- return false;
2074
- }
2075
- #endif
2076
-
2077
2028
extern bool yield_to (struct task_struct * p , bool preempt );
2078
2029
extern void set_user_nice (struct task_struct * p , long nice );
2079
2030
extern int task_prio (const struct task_struct * p );
@@ -2703,8 +2654,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2703
2654
extern long sched_setaffinity (pid_t pid , const struct cpumask * new_mask );
2704
2655
extern long sched_getaffinity (pid_t pid , struct cpumask * mask );
2705
2656
2706
- extern void normalize_rt_tasks (void );
2707
-
2708
2657
#ifdef CONFIG_CGROUP_SCHED
2709
2658
2710
2659
extern struct task_group root_task_group ;
0 commit comments