From: Vineet Gupta Date: Mon, 4 Aug 2014 15:32:31 +0000 (-0700) Subject: ARC: untangle cache flush loop X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=11e14896ea3b0acbdcadb171bc40fc8fef37370e;p=openwrt%2Fstaging%2Fblogic.git ARC: untangle cache flush loop - Remove the ifdef'ery and write distinct versions for each mmu ver even if there is some code duplication Signed-off-by: Vineet Gupta --- diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 82d24c337a0b..6fa5f0f7f549 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -140,25 +140,19 @@ dc_chk: * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs */ -static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, - unsigned long sz, const int op) +static inline +void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int op) { - unsigned int aux_cmd, aux_tag; + unsigned int aux_cmd; int num_lines; - const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; + const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; if (op == OP_INV_IC) { aux_cmd = ARC_REG_IC_IVIL; -#if (CONFIG_ARC_MMU_VER > 2) - aux_tag = ARC_REG_IC_PTAG; -#endif - } - else { + } else { /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; -#if (CONFIG_ARC_MMU_VER > 2) - aux_tag = ARC_REG_DC_PTAG; -#endif } /* Ensure we properly floor/ceil the non-line aligned/sized requests @@ -167,7 +161,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, * -@paddr will be cache-line aligned already (being page aligned) * -@sz will be integral multiple of line size (being page sized). */ - if (!full_page_op) { + if (!full_page) { sz += paddr & ~CACHE_LINE_MASK; paddr &= CACHE_LINE_MASK; vaddr &= CACHE_LINE_MASK; @@ -175,32 +169,68 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); -#if (CONFIG_ARC_MMU_VER <= 2) /* MMUv2 and before: paddr contains stuffed vaddrs bits */ paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; -#else - /* if V-P const for loop, PTAG can be written once outside loop */ - if (full_page_op) + + while (num_lines-- > 0) { + write_aux_reg(aux_cmd, paddr); + paddr += L1_CACHE_BYTES; + } +} + +static inline +void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int op) +{ + unsigned int aux_cmd, aux_tag; + int num_lines; + const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; + + if (op == OP_INV_IC) { + aux_cmd = ARC_REG_IC_IVIL; + aux_tag = ARC_REG_IC_PTAG; + } else { + aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; + aux_tag = ARC_REG_DC_PTAG; + } + + /* Ensure we properly floor/ceil the non-line aligned/sized requests + * and have @paddr - aligned to cache line and integral @num_lines. + * This however can be avoided for page sized since: + * -@paddr will be cache-line aligned already (being page aligned) + * -@sz will be integral multiple of line size (being page sized). + */ + if (!full_page) { + sz += paddr & ~CACHE_LINE_MASK; + paddr &= CACHE_LINE_MASK; + vaddr &= CACHE_LINE_MASK; + } + num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); + + /* + * MMUv3, cache ops require paddr in PTAG reg + * if V-P const for loop, PTAG can be written once outside loop + */ + if (full_page) write_aux_reg(aux_tag, paddr); -#endif while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - /* MMUv3, cache ops require paddr seperately */ - if (!full_page_op) { + if (!full_page) { write_aux_reg(aux_tag, paddr); paddr += L1_CACHE_BYTES; } write_aux_reg(aux_cmd, vaddr); vaddr += L1_CACHE_BYTES; -#else - write_aux_reg(aux_cmd, paddr); - paddr += L1_CACHE_BYTES; -#endif } } +#if (CONFIG_ARC_MMU_VER < 3) +#define __cache_line_loop __cache_line_loop_v2 +#elif (CONFIG_ARC_MMU_VER == 3) +#define __cache_line_loop __cache_line_loop_v3 +#endif + #ifdef CONFIG_ARC_HAS_DCACHE /***************************************************************