Commit 28f6e08d authored by Rohit Gupta's avatar Rohit Gupta

apply patch -3.10.93-rt101

parent 8475a30f
......@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
On other - If you know of the key combos for other architectures, please
let me know so I can add them to this section.
On all - write a character to /proc/sysrq-trigger. e.g.:
On all - write a character to /proc/sysrq-trigger, e.g.:
echo t > /proc/sysrq-trigger
On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
Send an ICMP echo request with this pattern plus the particular
SysRq command key. Example:
# ping -c1 -s57 -p0102030468
will trigger the SysRq-H (help) command.
* What are the 'command' keys?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'b' - Will immediately reboot the system without syncing or unmounting
......
......@@ -6,6 +6,7 @@ config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
depends on HAVE_OPROFILE
depends on !PREEMPT_RT_FULL
select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP
help
......
......@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
generic-y += irq_work.h
......@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */
if (!mm || in_atomic())
if (!mm || pagefault_disabled())
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
......
......@@ -16,6 +16,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
......
......@@ -20,6 +20,7 @@ config ARM
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND
select IRQ_FORCED_THREADING
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
......@@ -48,6 +49,7 @@ config ARM
select HAVE_MEMBLOCK
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
select HAVE_PREEMPT_LAZY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
......
......@@ -10,6 +10,7 @@ generic-y += exec.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
......
......@@ -127,6 +127,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#else /* min ARCH >= ARMv6 */
#define __HAVE_ARCH_CMPXCHG 1
extern void __bad_cmpxchg(volatile void *ptr, int size);
/*
......
......@@ -90,6 +90,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
preempt_disable_rt();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
......@@ -101,6 +103,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory");
*uval = val;
preempt_enable_rt();
return ret;
}
......
......@@ -3,6 +3,14 @@
#include <linux/thread_info.h>
#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
#else
static inline void
switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
#endif
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'. schedule() itself
......@@ -12,6 +20,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
......
......@@ -50,6 +50,7 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
......@@ -148,6 +149,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_NEED_RESCHED_LAZY 3
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
......@@ -160,6 +162,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
......
......@@ -53,6 +53,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
......
......@@ -205,11 +205,18 @@ __irq_svc:
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
bne 1f @ return from exeption
ldr r0, [tsk, #TI_FLAGS] @ get flags
tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
blne svc_preempt @ preempt!
ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
teq r8, #0 @ if preempt lazy count != 0
movne r0, #0 @ force flags to 0
tst r0, #_TIF_NEED_RESCHED
tst r0, #_TIF_NEED_RESCHED_LAZY
blne svc_preempt
1:
#endif
svc_exit r5, irq = 1 @ return from exception
......@@ -224,6 +231,8 @@ svc_preempt:
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
bne 1b
tst r0, #_TIF_NEED_RESCHED_LAZY
moveq pc, r8 @ go again
b 1b
#endif
......
......@@ -118,7 +118,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
continue;
}
err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
cpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
......
......@@ -596,6 +596,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
/*
* CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
* initialized by pgtable_page_ctor() then a coredump of the vector page will
* fail.
*/
static int __init vectors_user_mapping_init_page(void)
{
struct page *page;
unsigned long addr = 0xffff0000;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
page = pmd_page(*(pmd));
pgtable_page_ctor(page);
return 0;
}
late_initcall(vectors_user_mapping_init_page);
#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
......
......@@ -584,7 +584,8 @@ asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
if (likely(thread_flags & (_TIF_NEED_RESCHED |
_TIF_NEED_RESCHED_LAZY))) {
schedule();
} else {
if (unlikely(!user_mode(regs)))
......
......@@ -87,7 +87,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
......@@ -195,7 +195,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
spin_lock_irqsave(&unwind_lock, flags);
raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
......@@ -207,7 +207,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
spin_unlock_irqrestore(&unwind_lock, flags);
raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
......@@ -469,9 +469,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
spin_lock_irqsave(&unwind_lock, flags);
raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
......@@ -483,9 +483,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
spin_lock_irqsave(&unwind_lock, flags);
raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
spin_unlock_irqrestore(&unwind_lock, flags);
raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
......@@ -487,9 +487,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
static void vcpu_pause(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
wait_event_interruptible(*wq, !vcpu->arch.pause);
swait_event_interruptible(*wq, !vcpu->arch.pause);
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
......
......@@ -35,7 +35,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu;
wait_queue_head_t *wq;
struct swait_head *wq;
unsigned long cpu_id;
phys_addr_t target_pc;
......@@ -66,7 +66,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu->arch.pause = false;
smp_mb(); /* Make sure the above is visible */
wake_up_interruptible(wq);
swait_wake_interruptible(wq);
return KVM_PSCI_RET_SUCCESS;
}
......
......@@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
......
......@@ -77,7 +77,7 @@ static struct clocksource pit_clk = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct irqaction at91sam926x_pit_irq;
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
......@@ -86,6 +86,8 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* Set up irq handler */
setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
/* update clocksource counter */
pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
......@@ -98,6 +100,7 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
case CLOCK_EVT_MODE_UNUSED:
/* disable irq, leaving the clocksource active */
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
break;
case CLOCK_EVT_MODE_RESUME:
break;
......
......@@ -80,7 +80,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
static DEFINE_SPINLOCK(boot_lock);
static DEFINE_RAW_SPINLOCK(boot_lock);
static void __cpuinit exynos_secondary_init(unsigned int cpu)
{
......@@ -95,8 +95,8 @@ static void __cpuinit exynos_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
}
static int exynos_power_up_cpu(unsigned int phys_cpu)
......@@ -162,7 +162,7 @@ static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
......@@ -218,7 +218,7 @@ static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
......
......@@ -30,7 +30,7 @@
extern void msm_secondary_startup(void);
static DEFINE_SPINLOCK(boot_lock);
static DEFINE_RAW_SPINLOCK(boot_lock);
static inline int get_core_count(void)
{
......@@ -50,8 +50,8 @@ static void __cpuinit msm_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
}
static __cpuinit void prepare_cold_cpu(unsigned int cpu)
......@@ -88,7 +88,7 @@ static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *id
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
......@@ -122,7 +122,7 @@ static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *id
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
......
......@@ -44,7 +44,7 @@ u16 pm44xx_errata;
/* SCU base address */
static void __iomem *scu_base;
static DEFINE_SPINLOCK(boot_lock);
static DEFINE_RAW_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
......@@ -68,8 +68,8 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
}
static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
......@@ -83,7 +83,7 @@ static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
raw_spin_lock(&boot_lock);
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
......@@ -160,7 +160,7 @@ static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
raw_spin_unlock(&boot_lock);
return 0;
}
......
......@@ -23,7 +23,7 @@
static void __iomem *scu_base;
static void __iomem *rsc_base;
static DEFINE_SPINLOCK(boot_lock);
static DEFINE_RAW_SPINLOCK(boot_lock);
static struct map_desc scu_io_desc __initdata = {
.length = SZ_4K,
......@@ -56,8 +56,8 @@ static void __cpuinit sirfsoc_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
}
static struct of_device_id rsc_ids[] = {
......@@ -95,7 +95,7 @@ static int __cpuinit sirfsoc_boot_secondary(unsigned int cpu, struct task_struct
/* make sure write buffer is drained */
mb();
spin_lock(&boot_lock);
raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
......@@ -128,7 +128,7 @@ static int __cpuinit sirfsoc_boot_secondary(unsigned int cpu, struct task_struct
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
......
......@@ -20,7 +20,7 @@
#include <mach/spear.h>
#include "generic.h"
static DEFINE_SPINLOCK(boot_lock);
static DEFINE_RAW_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
......@@ -36,8 +36,8 @@ static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
}
static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
......@@ -48,7 +48,7 @@ static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_stru
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
......@@ -75,7 +75,7 @@ static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_stru
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
......
......@@ -52,7 +52,7 @@ static void __iomem *scu_base_addr(void)
return NULL;
}
static DEFINE_SPINLOCK(boot_lock);
static DEFINE_RAW_SPINLOCK(boot_lock);
static void __cpuinit ux500_secondary_init(unsigned int cpu)
{
......@@ -65,8 +65,8 @@ static void __cpuinit ux500_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
}
static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
......@@ -77,7 +77,7 @@ static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
......@@ -98,7 +98,7 @@ static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
......
......@@ -431,6 +431,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
if (interrupts_enabled(regs))