Commit 51bf2bc1 authored by Rohit Gupta's avatar Rohit Gupta

fix for rejects after patch

parent bd9c6f7f
...@@ -122,6 +122,7 @@ static int exynos_power_up_cpu(unsigned int phys_cpu) ...@@ -122,6 +122,7 @@ static int exynos_power_up_cpu(unsigned int phys_cpu)
if (timeout == 0) { if (timeout == 0) {
printk(KERN_ERR "cpu%d power up failed\n", phys_cpu); printk(KERN_ERR "cpu%d power up failed\n", phys_cpu);
raw_spin_unlock(&boot_lock);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
} }
......
...@@ -277,7 +277,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -277,7 +277,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt, or have no irqs, or have no user * If we're in an interrupt, or have no irqs, or have no user
* context, we must not take the fault.. * context, we must not take the fault..
*/ */
if (in_atomic() || irqs_disabled() || !mm) //RGu if (in_atomic() || irqs_disabled() || !mm)
if (!mm || pagefault_disabled() || irqs_disabled() )
goto no_context; goto no_context;
if (user_mode(regs)) if (user_mode(regs))
......
...@@ -54,5 +54,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/ ...@@ -54,5 +54,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
obj-$(CONFIG_SCHED_HMP) += cci400.o obj-$(CONFIG_SCHED_HMP) += cci400.o
obj-$(CONFIG_MIPI_LLI) += mipi-lli/ obj-$(CONFIG_MIPI_LLI) += mipi-lli/
...@@ -176,6 +176,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); ...@@ -176,6 +176,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
extern int mutex_trylock(struct mutex *lock); extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock);
#endif /* !PREEMPT_RT_FULL */
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
......
...@@ -246,6 +246,9 @@ int task_free_unregister(struct notifier_block *n) ...@@ -246,6 +246,9 @@ int task_free_unregister(struct notifier_block *n)
} }
EXPORT_SYMBOL(task_free_unregister); EXPORT_SYMBOL(task_free_unregister);
#ifdef CONFIG_PREEMPT_RT_BASE
static
#endif
void __put_task_struct(struct task_struct *tsk) void __put_task_struct(struct task_struct *tsk)
{ {
WARN_ON(!tsk->exit_state); WARN_ON(!tsk->exit_state);
......
...@@ -47,12 +47,14 @@ ...@@ -47,12 +47,14 @@
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/sched/rt.h> #include <linux/sched/rt.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <mach/exynos-ss.h> #include <mach/exynos-ss.h>
#include <trace/events/timer.h> #include <trace/events/timer.h>
#include <trace/events/hist.h>
/* /*
* The timer bases: * The timer bases:
......
...@@ -4081,13 +4081,22 @@ static struct task_struct *find_process_by_pid(pid_t pid) ...@@ -4081,13 +4081,22 @@ static struct task_struct *find_process_by_pid(pid_t pid)
extern struct cpumask hmp_slow_cpu_mask; extern struct cpumask hmp_slow_cpu_mask;
/* Actually do priority change: must hold rq lock. */
static void //RGu static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) //RGu __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
static void __setscheduler_params(struct task_struct *p, int policy, int prio)
{ {
p->policy = policy; p->policy = policy;
p->rt_priority = prio; p->rt_priority = prio;
p->normal_prio = normal_prio(p); p->normal_prio = normal_prio(p);
set_load_weight(p);
}
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
{
__setscheduler_params(p, policy, prio);
/* we are holding p->pi_lock already */ /* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p); p->prio = rt_mutex_getprio(p);
if (rt_prio(p->prio)) { if (rt_prio(p->prio)) {
...@@ -4099,7 +4108,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) ...@@ -4099,7 +4108,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
} }
else else
p->sched_class = &fair_sched_class; p->sched_class = &fair_sched_class;
set_load_weight(p); //RGu set_load_weight(p);
} }
/* /*
...@@ -7506,7 +7515,9 @@ void __might_sleep(const char *file, int line, int preempt_offset) ...@@ -7506,7 +7515,9 @@ void __might_sleep(const char *file, int line, int preempt_offset)
static unsigned long prev_jiffy; /* ratelimiting */ static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || //RGu if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
!is_idle_task(current)) ||
oops_in_progress) oops_in_progress)
return; return;
if (system_state != SYSTEM_RUNNING && if (system_state != SYSTEM_RUNNING &&
......
...@@ -416,14 +416,16 @@ asmlinkage void __do_softirq(void) ...@@ -416,14 +416,16 @@ asmlinkage void __do_softirq(void)
__local_bh_disable((unsigned long)__builtin_return_address(0), __local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET); SOFTIRQ_OFFSET);
lockdep_softirq_enter(); //RGu lockdep_softirq_enter();
lockdep_softirq_start();
cpu = smp_processor_id(); cpu = smp_processor_id();
restart: restart:
/* Reset the pending bitmask before enabling irqs */ /* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0); set_softirq_pending(0);
local_irq_enable(); /* RGu
local_irq_enable();
h = softirq_vec; h = softirq_vec;
...@@ -455,6 +457,8 @@ restart: ...@@ -455,6 +457,8 @@ restart:
} while (pending); } while (pending);
local_irq_disable(); local_irq_disable();
RGu */
handle_pending_softirqs(pending, cpu, 1);
pending = local_softirq_pending(); pending = local_softirq_pending();
if (pending) { if (pending) {
...@@ -911,16 +915,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) ...@@ -911,16 +915,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
EXPORT_SYMBOL(__tasklet_hi_schedule); EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t) /*RGu
{
BUG_ON(!irqs_disabled());
t->next = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, t);
__raise_softirq_irqoff(HI_SOFTIRQ);
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static void tasklet_action(struct softirq_action *a) static void tasklet_action(struct softirq_action *a)
{ {
...@@ -960,6 +955,128 @@ static void tasklet_action(struct softirq_action *a) ...@@ -960,6 +955,128 @@ static void tasklet_action(struct softirq_action *a)
local_irq_enable(); local_irq_enable();
} }
} }
*/
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
__tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
void tasklet_enable(struct tasklet_struct *t)
{
if (!atomic_dec_and_test(&t->count))
return;
if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
tasklet_schedule(t);
}
EXPORT_SYMBOL(tasklet_enable);
void tasklet_hi_enable(struct tasklet_struct *t)
{
if (!atomic_dec_and_test(&t->count))
return;
if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
tasklet_hi_schedule(t);
}
EXPORT_SYMBOL(tasklet_hi_enable);
static void
__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
{
int loops = 1000000;
while (list) {
struct tasklet_struct *t = list;
list = list->next;
/*
* Should always succeed - after a tasklist got on the
* list (after getting the SCHED bit set from 0 to 1),
* nothing but the tasklet softirq it got queued to can
* lock it:
*/
if (!tasklet_trylock(t)) {
WARN_ON(1);
continue;
}
t->next = NULL;
/*
* If we cannot handle the tasklet because it's disabled,
* mark it as pending. tasklet_enable() will later
* re-schedule the tasklet.
*/
if (unlikely(atomic_read(&t->count))) {
out_disabled:
/* implicit unlock: */
wmb();
t->state = TASKLET_STATEF_PENDING;
continue;
}
/*
* After this point on the tasklet might be rescheduled
* on another CPU, but it can only be added to another
* CPU's tasklet list if we unlock the tasklet (which we
* dont do yet).
*/
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
WARN_ON(1);
again:
t->func(t->data);
/*
* Try to unlock the tasklet. We must use cmpxchg, because
* another CPU might have scheduled or disabled the tasklet.
* We only allow the STATE_RUN -> 0 transition here.
*/
while (!tasklet_tryunlock(t)) {
/*
* If it got disabled meanwhile, bail out:
*/
if (atomic_read(&t->count))
goto out_disabled;
/*
* If it got scheduled meanwhile, re-execute
* the tasklet function:
*/
if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
goto again;
if (!--loops) {
printk("hm, tasklet state: %08lx\n", t->state);
WARN_ON(1);
tasklet_unlock(t);
break;
}
}
}
}
static void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
local_irq_disable();
list = __get_cpu_var(tasklet_vec).head;
__get_cpu_var(tasklet_vec).head = NULL;
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
local_irq_enable();
__tasklet_action(a, list);
}
static void tasklet_hi_action(struct softirq_action *a) static void tasklet_hi_action(struct softirq_action *a)
{ {
...@@ -970,7 +1087,7 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -970,7 +1087,7 @@ static void tasklet_hi_action(struct softirq_action *a)
__this_cpu_write(tasklet_hi_vec.head, NULL); __this_cpu_write(tasklet_hi_vec.head, NULL);
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
local_irq_enable(); local_irq_enable();
/* RGu
while (list) { while (list) {
struct tasklet_struct *t = list; struct tasklet_struct *t = list;
...@@ -998,6 +1115,8 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -998,6 +1115,8 @@ static void tasklet_hi_action(struct softirq_action *a)
__raise_softirq_irqoff(HI_SOFTIRQ); __raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable(); local_irq_enable();
} }
RGu */
__tasklet_action(a, list);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment