📄 1026.gcc41x.patch
字号:
++src_unaligned_dst_aligned:+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter+ PREF( 0, 3*32(src) )+ beqz t0, cleanup_src_unaligned+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES+ PREF( 1, 3*32(dst) )+1:+/*+ * Avoid consecutive LD*'s to the same register since some mips+ * implementations can't issue them in the same cycle.+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses+ * are to the same unit (unless src is aligned, but it's not).+ */+EXC( LDFIRST t0, FIRST(0)(src), l_exc)+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)+ SUB len, len, 4*NBYTES+EXC( LDREST t0, REST(0)(src), l_exc_copy)+EXC( LDREST t1, REST(1)(src), l_exc_copy)+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)+EXC( LDREST t2, REST(2)(src), l_exc_copy)+EXC( LDREST t3, REST(3)(src), l_exc_copy)+ PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)+ ADD src, src, 4*NBYTES+#ifdef CONFIG_CPU_SB1+ nop # improves slotting+#endif+ STORE t0, UNIT(0)(dst)+ STORE t1, UNIT(1)(dst)+ STORE t2, UNIT(2)(dst)+ STORE t3, UNIT(3)(dst)+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)+ bne len, rem, 1b+ ADD dst, dst, 4*NBYTES++cleanup_src_unaligned:+ beqz len, done+ and rem, len, NBYTES-1 # rem = len % NBYTES+ beq rem, len, copy_bytes+ nop+1:+EXC( LDFIRST t0, FIRST(0)(src), l_exc)+EXC( LDREST t0, REST(0)(src), l_exc_copy)+ ADD src, src, NBYTES+ SUB len, len, NBYTES+ STORE t0, 0(dst)+ bne len, rem, 1b+ ADD dst, dst, NBYTES++copy_bytes_checklen:+ beqz len, done+ nop+copy_bytes:+ /* 0 < len < NBYTES */+#define COPY_BYTE(N) \+EXC( lb t0, N(src), l_exc); \+ SUB len, len, 1; \+ beqz len, done; \+ sb t0, N(dst)++ COPY_BYTE(0)+ COPY_BYTE(1)+#ifdef USE_DOUBLE+ COPY_BYTE(2)+ COPY_BYTE(3)+ COPY_BYTE(4)+ COPY_BYTE(5)+#endif+EXC( lb t0, NBYTES-2(src), l_exc)+ SUB len, len, 1+ jr ra+ sb t0, NBYTES-2(dst)+done:+ jr ra+ nop+ END(__copy_user_inatomic)++l_exc_copy:+ /*+ * Copy bytes from src until faulting load address (or until a+ * lb faults)+ *+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)+ * may be more than a byte beyond the last address.+ * Hence, the lb below may get an exception.+ *+ * Assumes src < THREAD_BUADDR($28)+ */+ LOAD t0, TI_TASK($28)+ nop+ LOAD t0, THREAD_BUADDR(t0)+1:+EXC( lb t1, 0(src), l_exc)+ ADD src, src, 1+ sb t1, 0(dst) # can't fault -- we're copy_from_user+ bne src, t0, 1b+ ADD dst, dst, 1+l_exc:+ LOAD t0, TI_TASK($28)+ nop+ LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address+ nop+ SUB len, AT, t0 # len number of uncopied bytes+ jr ra+ nopdiff -Naur --exclude=CVS --exclude='*.o' --exclude='*.a' --exclude='*.so' --exclude='*.elf' --exclude=System.map --exclude=Makefile.d --exclude='*log' --exclude='*log2' --exclude='*~' --exclude='.*~' --exclude='.#*' --exclude='*.bak' --exclude='*.orig' --exclude='*.rej' --exclude='core.[0-9]*' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=vmlinux --exclude=vmlinux.bin --exclude=yamon-02.06-SIGMADESIGNS-01_el.bin linux-2.6.15.ref/include/asm-mips/uaccess.h linux-2.6.15/include/asm-mips/uaccess.h--- linux-2.6.15.ref/include/asm-mips/uaccess.h 2006-01-25 20:51:58.000000000 -0800+++ linux-2.6.15/include/asm-mips/uaccess.h 2007-05-11 15:46:34.000000000 -0700@@ -9,7 +9,6 @@ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H -#include <linux/config.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h>@@ -202,49 +201,49 @@ * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */-#ifdef __mips64-#define __GET_USER_DW(ptr) __get_user_asm("ld", ptr)-#else-#define __GET_USER_DW(ptr) __get_user_asm_ll32(ptr)+#ifdef CONFIG_32BIT+#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)+#endif+#ifdef CONFIG_64BIT+#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) #endif -#define __get_user_nocheck(x,ptr,size) \-({ \- __typeof(*(ptr)) __gu_val = (__typeof(*(ptr))) 0; \- long __gu_err = 0; \- \+extern void __get_user_unknown(void);++#define __get_user_common(val, size, ptr) \+do { \ switch (size) { \- case 1: __get_user_asm("lb", ptr); break; \- case 2: __get_user_asm("lh", ptr); break; \- case 4: __get_user_asm("lw", ptr); break; \- case 8: __GET_USER_DW(ptr); break; \+ case 1: __get_user_asm(val, "lb", ptr); break; \+ case 2: __get_user_asm(val, "lh", ptr); break; \+ case 4: __get_user_asm(val, "lw", ptr); break; \+ case 8: __GET_USER_DW(val, ptr); break; \ default: __get_user_unknown(); break; \ } \- (x) = (__typeof__(*(ptr))) __gu_val; \+} while (0)++#define __get_user_nocheck(x,ptr,size) \+({ \+ long __gu_err; \+ \+ __get_user_common((x), size, ptr); \ __gu_err; \ }) #define __get_user_check(x,ptr,size) \ ({ \- const __typeof__(*(ptr)) __user * __gu_addr = (ptr); \- __typeof__(*(ptr)) __gu_val = 0; \ long __gu_err = -EFAULT; \+ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \+ \+ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \+ __get_user_common((x), size, __gu_ptr); \ \- if (likely(access_ok(VERIFY_READ, __gu_addr, size))) { \- switch (size) { \- case 1: __get_user_asm("lb", __gu_addr); break; \- case 2: __get_user_asm("lh", __gu_addr); break; \- case 4: __get_user_asm("lw", __gu_addr); break; \- case 8: __GET_USER_DW(__gu_addr); break; \- default: __get_user_unknown(); break; \- } \- } \- (x) = (__typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) -#define __get_user_asm(insn, addr) \+#define __get_user_asm(val, insn, addr) \ { \+ long __gu_tmp; \+ \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \@@ -255,19 +254,25 @@ " .section __ex_table,\"a\" \n" \ " "__UA_ADDR "\t1b, 3b \n" \ " .previous \n" \- : "=r" (__gu_err), "=r" (__gu_val) \+ : "=r" (__gu_err), "=r" (__gu_tmp) \ : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \+ \+ (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* * Get a long long 64 using 32 bit registers. */-#define __get_user_asm_ll32(addr) \+#define __get_user_asm_ll32(val, addr) \ { \+ union { \+ unsigned long long l; \+ __typeof__(*(addr)) t; \+ } __gu_tmp; \+ \ __asm__ __volatile__( \ "1: lw %1, (%3) \n" \ "2: lw %D1, 4(%3) \n" \- " move %0, $0 \n" \ "3: .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \@@ -278,21 +283,22 @@ " " __UA_ADDR " 1b, 4b \n" \ " " __UA_ADDR " 2b, 4b \n" \ " .previous \n" \- : "=r" (__gu_err), "=&r" (__gu_val) \+ : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ : "0" (0), "r" (addr), "i" (-EFAULT)); \+ \+ (val) = __gu_tmp.t; \ } -extern void __get_user_unknown(void);- /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */-#ifdef __mips64-#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)-#else+#ifdef CONFIG_32BIT #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) #endif+#ifdef CONFIG_64BIT+#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)+#endif #define __put_user_nocheck(x,ptr,size) \ ({ \@@ -429,8 +435,34 @@ __cu_len; \ }) -#define __copy_to_user_inatomic __copy_to_user-#define __copy_from_user_inatomic __copy_from_user+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);++#define __copy_to_user_inatomic(to,from,n) \+({ \+ void __user *__cu_to; \+ const void *__cu_from; \+ long __cu_len; \+ \+ __cu_to = (to); \+ __cu_from = (from); \+ __cu_len = (n); \+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \+ __cu_len; \+})++#define __copy_from_user_inatomic(to,from,n) \+({ \+ void *__cu_to; \+ const void __user *__cu_from; \+ long __cu_len; \+ \+ __cu_to = (to); \+ __cu_from = (from); \+ __cu_len = (n); \+ __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \+ __cu_len); \+ __cu_len; \+}) /* * copy_to_user: - Copy a block of data into user space.@@ -484,8 +516,32 @@ __cu_len_r; \ }) +#define __invoke_copy_from_user_inatomic(to,from,n) \+({ \+ register void *__cu_to_r __asm__ ("$4"); \+ register const void __user *__cu_from_r __asm__ ("$5"); \+ register long __cu_len_r __asm__ ("$6"); \+ \+ __cu_to_r = (to); \+ __cu_from_r = (from); \+ __cu_len_r = (n); \+ __asm__ __volatile__( \+ ".set\tnoreorder\n\t" \+ __MODULE_JAL(__copy_user_inatomic) \+ ".set\tnoat\n\t" \+ __UA_ADDU "\t$1, %1, %2\n\t" \+ ".set\tat\n\t" \+ ".set\treorder" \+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \+ : \+ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \+ "memory"); \+ __cu_len_r; \+})+ /*- * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space.+ * __copy_from_user: - Copy a block of data from user space, with less checking.+ * @to: Destination address, in kernel space. * @from: Source address, in user space. * @n: Number of bytes to copy. *diff -Naur --exclude=CVS --exclude='*.o' --exclude='*.a' --exclude='*.so' --exclude='*.elf' --exclude=System.map --exclude=Makefile.d --exclude='*log' --exclude='*log2' --exclude='*~' --exclude='.*~' --exclude='.#*' --exclude='*.bak' --exclude='*.orig' --exclude='*.rej' --exclude='core.[0-9]*' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=.depend --exclude='.*.o.flags' --exclude='*.gz' --exclude=vmlinux --exclude=vmlinux.bin --exclude=yamon-02.06-SIGMADESIGNS-01_el.bin linux-2.6.15.ref/README.1026.gcc41x.patch linux-2.6.15/README.1026.gcc41x.patch--- linux-2.6.15.ref/README.1026.gcc41x.patch 1969-12-31 16:00:00.000000000 -0800+++ linux-2.6.15/README.1026.gcc41x.patch 2007-05-11 15:47:29.000000000 -0700@@ -0,0 +1,18 @@+Feature:+--------+To support GCC 4.1.x or above.++Prerequisite patch numbers:+---------------------------+none++Primary author:+---------------+External (YH Lin)++Related to which chip version SMP86xx xx=?+-----------------------------------------+all++(linux patches) which CONFIG_... are provided:+----------------------------------------------
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -