#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/uaccess.h>
#include <linux/seq_file.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+#include <linux/uaccess.h>
+#else
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <asm/uaccess.h>
+#define __user
+#define unlocked_ioctl ioctl
+#endif
+
#define WARM_CODE
#include "../warm.h"
#include "warm_ops.h"
#error need proc_fs
#endif
-#define WARM_VER "r1"
+#define WARM_VER "r3"
#define PFX "wARM: "
+#define WARM_INFO(fmt, ...) \
+ if (verbose) \
+ pr_info(PFX fmt, ##__VA_ARGS__)
+
+#define SECTION_SIZE 0x100000
#define MAX_CACHEOP_RANGE 16384
/* assume RAM starts at phys addr 0 (this is really machine specific) */
#define RAM_PHYS_START 0
#define RAM_MAX_SIZE 0x10000000 /* 256M, try to be future proof */
+/* expected CPU id */
+#if defined(CONFIG_CPU_ARM926T)
+#define EXPECTED_ID 0x069260
+#elif defined(CONFIG_CPU_ARM920T)
+#define EXPECTED_ID 0x029200
+#else
+#error "unsupported CPU"
+#endif
+
extern unsigned long max_mapnr;
/* "upper" physical memory, not seen by Linux and to be mmap'ed */
static u32 uppermem_start;
static u32 uppermem_end;
+static spinlock_t lock;
+
+static int verbose;
static u32 *get_pgtable(void)
{
static int do_set_cb_uppermem(int in_cb, int is_set)
{
+ unsigned long flags;
u32 *pgtable, *cpt;
int i, j, count = 0;
int bits = 0;
if (in_cb & WCB_B_BIT)
bits |= 4;
+ spin_lock_irqsave(&lock, flags);
+
pgtable = get_pgtable();
for (i = 0; i < 4096; i++)
{
- if (!(pgtable[i] & 1))
- /* must be course of fine page table */
+ if ((pgtable[i] & 3) != 1)
+ /* must be coarse page table */
continue;
cpt = __va(pgtable[i] & 0xfffffc00);
warm_cop_clean_d();
warm_drain_wb_inval_tlb();
- pr_info(PFX "%c%c bit(s) %s for phys %08x-%08x (%d pages)\n",
+ spin_unlock_irqrestore(&lock, flags);
+
+ WARM_INFO("%c%c bit(s) %s for phys %08x-%08x (%d pages)\n",
bits & 8 ? 'c' : ' ', bits & 4 ? 'b' : ' ',
is_set ? "set" : "cleared",
uppermem_start, uppermem_end - 1, count);
static int do_set_cb_virt(int in_cb, int is_set, u32 addr, u32 size)
{
int count = 0, bits = 0;
- u32 desc1, desc2;
- u32 *pgtable, *cpt;
+ unsigned long flags;
+ u32 desc1, desc2 = 0;
+ u32 *pgtable, *cpt = NULL;
u32 start, end;
+ u32 mask;
if (in_cb & WCB_C_BIT)
bits |= 8;
if (in_cb & WCB_B_BIT)
bits |= 4;
- size += addr & ~(PAGE_SIZE - 1);
- size = ALIGN(size, PAGE_SIZE);
+ mask = PAGE_SIZE - 1;
+ size += addr & mask;
+ size = (size + mask) & ~mask;
addr &= ~(PAGE_SIZE - 1);
start = addr;
end = addr + size;
+ spin_lock_irqsave(&lock, flags);
+
pgtable = get_pgtable();
- for (; addr < end; addr += PAGE_SIZE)
+ while (addr < end)
{
desc1 = pgtable[addr >> 20];
- if (!(desc1 & 3))
+ switch (desc1 & 3) {
+ case 0:
+ spin_unlock_irqrestore(&lock, flags);
+ printk(KERN_WARNING PFX "address %08x not mapped.\n", addr);
return -EINVAL;
+ case 1:
+ /* coarse table */
+ cpt = __va(desc1 & 0xfffffc00);
+ desc2 = cpt[(addr >> 12) & 0xff];
+ break;
+ case 2:
+ /* section */
+ if (is_set)
+ desc1 |= bits;
+ else
+ desc1 &= ~bits;
+ pgtable[addr >> 20] = desc1;
+ addr += SECTION_SIZE;
+ count++;
+ continue;
+ case 3:
+ cpt = __va(desc1 & 0xfffff000);
+ desc2 = cpt[(addr >> 10) & 0x3ff];
+ break;
+ }
- cpt = __va(desc1 & 0xfffffc00);
- desc2 = cpt[(addr >> 12) & 0xff];
-
- if ((desc2 & 3) != 2) {
- printk(KERN_WARNING PFX "not small page? %08x %08x\n", desc2, addr);
+ if ((desc2 & 3) == 0) {
+ spin_unlock_irqrestore(&lock, flags);
+ printk(KERN_WARNING PFX "address %08x not mapped (%08x)\n",
+ addr, desc2);
return -EINVAL;
}
desc2 |= bits;
else
desc2 &= ~bits;
- desc2 |= 0xff0;
- cpt[(addr >> 12) & 0xff] = desc2;
+ /* this might be bad idea, better let it fault so that Linux does
+ * it's accounting, but that will drop CB bits, so keep this
+ * for compatibility */
+ if ((desc2 & 3) == 2)
+ desc2 |= 0xff0;
+
+ switch (desc1 & 3) {
+ case 1:
+ cpt[(addr >> 12) & 0xff] = desc2;
+ break;
+ case 3:
+ cpt[(addr >> 10) & 0x3ff] = desc2;
+ break;
+ }
+
+ addr += PAGE_SIZE;
count++;
}
warm_cop_clean_d();
warm_drain_wb_inval_tlb();
- pr_info(PFX "%c%c bit(s) %s virt %08x-%08x (%d pages)\n",
+ spin_unlock_irqrestore(&lock, flags);
+
+ WARM_INFO("%c%c bit(s) %s virt %08x-%08x (%d pages)\n",
bits & 8 ? 'c' : ' ', bits & 4 ? 'b' : ' ',
is_set ? "set" : "cleared", start, end - 1, count);
pgtable = get_pgtable();
desc1 = pgtable[addr >> 20];
- if (!(desc1 & 3))
- return -EINVAL;
-
- if ((desc1 & 3) == 2) {
- /* 1MB section */
+ switch (desc1 & 3) {
+ case 1: /* coarse table */
+ cpt = __va(desc1 & 0xfffffc00);
+ desc2 = cpt[(addr >> 12) & 0xff];
+ break;
+ case 2: /* 1MB section */
*_addr = (desc1 & 0xfff00000) | (addr & 0xfffff);
return 0;
+ case 3: /* fine table */
+ cpt = __va(desc1 & 0xfffff000);
+ desc2 = cpt[(addr >> 10) & 0x3ff];
+ break;
+ default:
+ return -EINVAL;
}
-
- cpt = __va(desc1 & 0xfffffc00);
- desc2 = cpt[(addr >> 12) & 0xff];
-
- if ((desc2 & 3) != 2) {
- printk(KERN_WARNING PFX "not small page? %08x %08x\n", desc2, addr);
+
+ switch (desc2 & 3) {
+ case 1: /* large page */
+ *_addr = (desc2 & ~0xffff) | (addr & 0xffff);
+ break;
+ case 2: /* small page */
+ *_addr = (desc2 & ~0x0fff) | (addr & 0x0fff);
+ break;
+ case 3: /* tiny page */
+ *_addr = (desc2 & ~0x03ff) | (addr & 0x03ff);
+ break;
+ default:
return -EINVAL;
}
- *_addr = (desc2 & 0xfffffc00) | (addr & 0x3ff);
return 0;
}
return 0;
}
+static int do_map_op(u32 vaddr, u32 paddr, u32 size, int cb, int is_unmap)
+{
+ int count = 0, retval = 0;
+ unsigned long flags;
+ u32 pstart, start, end;
+ u32 desc1, apcb_bits;
+ u32 *pgtable;
+ u32 v, mask;
+
+ apcb_bits = (3 << 10) | (1 << 5); /* r/w, dom 1 */
+ if (cb & WCB_C_BIT)
+ apcb_bits |= 8;
+ if (cb & WCB_B_BIT)
+ apcb_bits |= 4;
+
+ mask = SECTION_SIZE - 1;
+ size = (size + mask) & ~mask;
+
+ pstart = paddr;
+ start = vaddr;
+ end = start + size;
+
+ /* check for overflows */
+ if (end - 1 < start)
+ return -EINVAL;
+ if (pstart + size - 1 < pstart)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lock, flags);
+
+ pgtable = get_pgtable();
+
+ for (; vaddr < end; vaddr += SECTION_SIZE, paddr += SECTION_SIZE)
+ {
+ desc1 = pgtable[vaddr >> 20];
+
+ if (is_unmap) {
+ if ((desc1 & 3) != 2) {
+ spin_unlock_irqrestore(&lock, flags);
+ printk(KERN_WARNING PFX "vaddr %08x is not a section? (%08x)\n",
+ vaddr, desc1);
+ return -EINVAL;
+ }
+ v = 0;
+ } else {
+ if ((desc1 & 3) != 0) {
+ printk(KERN_WARNING PFX "vaddr %08x already mapped? (%08x)\n",
+ vaddr, desc1);
+ retval = -EINVAL;
+ break;
+ }
+ v = (paddr & ~mask) | apcb_bits | 0x12;
+ }
+
+ pgtable[vaddr >> 20] = v;
+ count++;
+ }
+
+ if (retval != 0) {
+ /* undo mappings */
+ vaddr = start;
+
+ for (; vaddr < end && count > 0; vaddr += SECTION_SIZE, count--)
+ pgtable[vaddr >> 20] = 0;
+ }
+
+ warm_cop_clean_d();
+ warm_drain_wb_inval_tlb();
+
+ spin_unlock_irqrestore(&lock, flags);
+
+ if (retval == 0 && !is_unmap) {
+ WARM_INFO("mapped %08x to %08x with %c%c bit(s) (%d section(s))\n",
+ start, pstart, apcb_bits & 8 ? 'c' : ' ',
+ apcb_bits & 4 ? 'b' : ' ', count);
+ }
+
+ return retval;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
static long warm_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
+#else
+static int warm_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long __arg)
+#endif
{
void __user *arg = (void __user *) __arg;
union {
struct warm_cache_op wcop;
struct warm_change_cb ccb;
+ struct warm_map_op mop;
unsigned long addr;
} u;
long ret;
return -EFAULT;
if (u.wcop.ops & ~(WOP_D_CLEAN|WOP_D_INVALIDATE|WOP_I_INVALIDATE))
return -EINVAL;
- if (u.wcop.size > MAX_CACHEOP_RANGE)
+ if (u.wcop.size == (unsigned long)-1 ||
+ (u.wcop.size > MAX_CACHEOP_RANGE && !(u.wcop.ops & WOP_D_INVALIDATE)))
ret = do_cache_ops_whole(u.wcop.ops);
else
ret = do_cache_ops(u.wcop.ops, u.wcop.addr, u.wcop.size);
if (copy_to_user(arg, &u.addr, sizeof(u.addr)))
return -EFAULT;
break;
+ case WARMC_MMAP:
+ if (copy_from_user(&u.mop, arg, sizeof(u.mop)))
+ return -EFAULT;
+ if (u.mop.cb & ~(WCB_C_BIT|WCB_B_BIT))
+ return -EINVAL;
+ ret = do_map_op(u.mop.virt_addr, u.mop.phys_addr, u.mop.size,
+ u.mop.cb, u.mop.is_unmap);
+ break;
default:
ret = -ENOTTY;
break;
static int __init warm_module_init(void)
{
struct proc_dir_entry *pret;
+ u32 cpuid;
+
+ asm ("mrc p15, 0, %0, c0, c0, 0" : "=r"(cpuid));
+ if ((cpuid & 0x0ffff0) != EXPECTED_ID) {
+ printk(KERN_ERR PFX "module was compiled for different CPU, aborting\n");
+ return -1;
+ }
pret = create_proc_entry("warm", S_IWUGO | S_IRUGO, NULL);
if (!pret) {
pret->owner = THIS_MODULE;
pret->proc_fops = &warm_fops;
+ spin_lock_init(&lock);
+
uppermem_start = RAM_PHYS_START + (max_mapnr << PAGE_SHIFT);
uppermem_end = RAM_PHYS_START + RAM_MAX_SIZE;
module_init(warm_module_init);
module_exit(warm_module_exit);
+module_param(verbose, int, 0644);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARM processor services");
MODULE_AUTHOR("Grazvydas Ignotas");