From: <ljs...@us...> - 2011-01-09 04:54:04
|
Revision: 699 http://cadcdev.svn.sourceforge.net/cadcdev/?rev=699&view=rev Author: ljsebald Date: 2011-01-09 04:53:55 +0000 (Sun, 09 Jan 2011) Log Message: ----------- Adding in patches for GCC 4.5.2 and Newlib 1.19.0. These are updated for all the new stuff in the thread code. Added Paths: ----------- kos/utils/dc-chain/patches/gcc-4.5.2-kos.diff kos/utils/dc-chain/patches/newlib-1.19.0-kos.diff Added: kos/utils/dc-chain/patches/gcc-4.5.2-kos.diff =================================================================== --- kos/utils/dc-chain/patches/gcc-4.5.2-kos.diff (rev 0) +++ kos/utils/dc-chain/patches/gcc-4.5.2-kos.diff 2011-01-09 04:53:55 UTC (rev 699) @@ -0,0 +1,1955 @@ +diff -ruN gcc-4.5.2-orig/gcc/config/sh/crt1.asm gcc-4.5.2/gcc/config/sh/crt1.asm +--- gcc-4.5.2-orig/gcc/config/sh/crt1.asm 2009-04-09 11:00:19.000000000 -0400 ++++ gcc-4.5.2/gcc/config/sh/crt1.asm 2010-08-12 09:41:54.000000000 -0400 +@@ -1,1369 +1,197 @@ +-/* Copyright (C) 2000, 2001, 2003, 2004, 2005, 2006, 2009 +- Free Software Foundation, Inc. +- This file was pretty much copied from newlib. ++! KallistiOS ##version## ++! ++! startup.s ++! (c)2000-2001 Dan Potter ++! ++! This file must appear FIRST in your linking order, or your program won't ++! work correctly as a raw binary. ++! ++! This is very loosely based on Marcus' crt0.s/startup.s ++! ++ ++.globl start ++.globl _start ++.globl _arch_real_exit ++.globl __arch_old_sr ++.globl __arch_old_vbr ++.globl __arch_old_stack ++.globl __arch_old_fpscr + +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify it +-under the terms of the GNU General Public License as published by the +-Free Software Foundation; either version 3, or (at your option) any +-later version. +- +-GCC is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-General Public License for more details. +- +-Under Section 7 of GPL version 3, you are granted additional +-permissions described in the GCC Runtime Library Exception, version +-3.1, as published by the Free Software Foundation. +- +-You should have received a copy of the GNU General Public License and +-a copy of the GCC Runtime Library Exception along with this program; +-see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +-<http://www.gnu.org/licenses/>. */ +- +- +-#ifdef MMU_SUPPORT +- /* Section used for exception/timer interrupt stack area */ +- .section .data.vbr.stack,"aw" +- .align 4 +- .global __ST_VBR +-__ST_VBR: +- .zero 1024 * 2 /* ; 2k for VBR handlers */ +-/* Label at the highest stack address where the stack grows from */ +-__timer_stack: +-#endif /* MMU_SUPPORT */ +- +- /* ;---------------------------------------- +- Normal newlib crt1.asm */ +- +-#ifdef __SH5__ +- .section .data,"aw" +- .global ___data +-___data: +- +- .section .rodata,"a" +- .global ___rodata +-___rodata: +- +-#define ICCR_BASE 0x01600000 +-#define OCCR_BASE 0x01e00000 +-#define MMUIR_BASE 0x00000000 +-#define MMUDR_BASE 0x00800000 +- +-#define PTE_ENABLED 1 +-#define PTE_DISABLED 0 +- +-#define PTE_SHARED (1 << 1) +-#define PTE_NOT_SHARED 0 +- +-#define PTE_CB_UNCACHEABLE 0 +-#define PTE_CB_DEVICE 1 +-#define PTE_CB_CACHEABLE_WB 2 +-#define PTE_CB_CACHEABLE_WT 3 +- +-#define PTE_SZ_4KB (0 << 3) +-#define PTE_SZ_64KB (1 << 3) +-#define PTE_SZ_1MB (2 << 3) +-#define PTE_SZ_512MB (3 << 3) +- +-#define PTE_PRR (1 << 6) +-#define PTE_PRX (1 << 7) +-#define PTE_PRW (1 << 8) +-#define PTE_PRU (1 << 9) +- +-#define SR_MMU_BIT 31 +-#define SR_BL_BIT 28 +- +-#define ALIGN_4KB (0xfff) +-#define ALIGN_1MB (0xfffff) +-#define ALIGN_512MB (0x1fffffff) +- +-#define DYNACON_BASE 0x0f000000 +-#define DM_CB_DLINK_BASE 0x0c000000 +-#define DM_DB_DLINK_BASE 0x0b000000 +- +-#define FEMI_AREA_0 0x00000000 +-#define FEMI_AREA_1 0x04000000 +-#define FEMI_AREA_2 0x05000000 +-#define FEMI_AREA_3 0x06000000 +-#define FEMI_AREA_4 0x07000000 +-#define FEMI_CB 0x08000000 +- +-#define EMI_BASE 0X80000000 +- +-#define DMA_BASE 0X0e000000 +- +-#define CPU_BASE 0X0d000000 +- +-#define PERIPH_BASE 0X09000000 +-#define DMAC_BASE 0x0e000000 +-#define INTC_BASE 0x0a000000 +-#define CPRC_BASE 0x0a010000 +-#define TMU_BASE 0x0a020000 +-#define SCIF_BASE 0x0a030000 +-#define RTC_BASE 0x0a040000 +- +- +- +-#define LOAD_CONST32(val, reg) \ +- movi ((val) >> 16) & 65535, reg; \ +- shori (val) & 65535, reg +- +-#define LOAD_PTEH_VAL(sym, align, bits, scratch_reg, reg) \ +- LOAD_ADDR (sym, reg); \ +- LOAD_CONST32 ((align), scratch_reg); \ +- andc reg, scratch_reg, reg; \ +- LOAD_CONST32 ((bits), scratch_reg); \ +- or reg, scratch_reg, reg +- +-#define LOAD_PTEL_VAL(sym, align, bits, scratch_reg, reg) \ +- LOAD_ADDR (sym, reg); \ +- LOAD_CONST32 ((align), scratch_reg); \ +- andc reg, scratch_reg, reg; \ +- LOAD_CONST32 ((bits), scratch_reg); \ +- or reg, scratch_reg, reg +- +-#define SET_PTE(pte_addr_reg, pteh_val_reg, ptel_val_reg) \ +- putcfg pte_addr_reg, 0, r63; \ +- putcfg pte_addr_reg, 1, ptel_val_reg; \ +- putcfg pte_addr_reg, 0, pteh_val_reg +- +-#if __SH5__ == 64 +- .section .text,"ax" +-#define LOAD_ADDR(sym, reg) \ +- movi (sym >> 48) & 65535, reg; \ +- shori (sym >> 32) & 65535, reg; \ +- shori (sym >> 16) & 65535, reg; \ +- shori sym & 65535, reg +-#else +- .mode SHmedia +- .section .text..SHmedia32,"ax" +-#define LOAD_ADDR(sym, reg) \ +- movi (sym >> 16) & 65535, reg; \ +- shori sym & 65535, reg +-#endif +- .global start ++_start: + start: +- LOAD_ADDR (_stack, r15) +- +-#ifdef MMU_SUPPORT +- ! Set up the VM using the MMU and caches +- +- ! .vm_ep is first instruction to execute +- ! after VM initialization +- pt/l .vm_ep, tr1 +- +- ! Configure instruction cache (ICCR) +- movi 3, r2 +- movi 0, r3 +- LOAD_ADDR (ICCR_BASE, r1) +- putcfg r1, 0, r2 +- putcfg r1, 1, r3 +- +- ! movi 7, r2 ! write through +- ! Configure operand cache (OCCR) +- LOAD_ADDR (OCCR_BASE, r1) +- putcfg r1, 0, r2 +- putcfg r1, 1, r3 +- +- ! Disable all PTE translations +- LOAD_ADDR (MMUIR_BASE, r1) +- LOAD_ADDR (MMUDR_BASE, r2) +- movi 64, r3 +- pt/l .disable_ptes_loop, tr0 +-.disable_ptes_loop: +- putcfg r1, 0, r63 +- putcfg r2, 0, r63 +- addi r1, 16, r1 +- addi r2, 16, r2 +- addi r3, -1, r3 +- bgt r3, r63, tr0 +- +- LOAD_ADDR (MMUIR_BASE, r1) +- +- ! FEMI instruction mappings +- ! Area 0 - 1Mb cacheable at 0x00000000 +- ! Area 1 - None +- ! Area 2 - 1Mb cacheable at 0x05000000 +- ! - 1Mb cacheable at 0x05100000 +- ! Area 3 - None +- ! Area 4 - None +- +- ! Map a 1Mb page for instructions at 0x00000000 +- LOAD_PTEH_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRX | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1Mb page for instructions at 0x05000000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRX | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1Mb page for instructions at 0x05100000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRX | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 512M page for instructions at EMI base +- addi r1, 16, r1 +- LOAD_PTEH_VAL (EMI_BASE, ALIGN_512MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (EMI_BASE, ALIGN_512MB, PTE_CB_CACHEABLE_WB | PTE_SZ_512MB | PTE_PRX | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for instructions at DM_DB_DLINK_BASE +- addi r1, 16, r1 +- LOAD_PTEH_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_CB_CACHEABLE_WB | PTE_SZ_4KB | PTE_PRX | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- LOAD_ADDR (MMUDR_BASE, r1) +- +- ! FEMI data mappings +- ! Area 0 - 1Mb cacheable at 0x00000000 +- ! Area 1 - 1Mb device at 0x04000000 +- ! Area 2 - 1Mb cacheable at 0x05000000 +- ! - 1Mb cacheable at 0x05100000 +- ! Area 3 - None +- ! Area 4 - None +- ! CB - 1Mb device at 0x08000000 +- +- ! Map a 1Mb page for data at 0x00000000 +- LOAD_PTEH_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (FEMI_AREA_0, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1Mb page for data at 0x04000000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL (FEMI_AREA_1, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (FEMI_AREA_1, ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1Mb page for data at 0x05000000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (FEMI_AREA_2, ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1Mb page for data at 0x05100000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((FEMI_AREA_2+0x100000), ALIGN_1MB, PTE_CB_CACHEABLE_WB | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for registers at 0x08000000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL (FEMI_CB, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (FEMI_CB, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 512M page for data at EMI +- addi r1, 16, r1 +- LOAD_PTEH_VAL (EMI_BASE, ALIGN_512MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (EMI_BASE, ALIGN_512MB, PTE_CB_CACHEABLE_WB | PTE_SZ_512MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for DYNACON at DYNACON_BASE +- addi r1, 16, r1 +- LOAD_PTEH_VAL (DYNACON_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (DYNACON_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for instructions at DM_DB_DLINK_BASE +- addi r1, 16, r1 +- LOAD_PTEH_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (DM_DB_DLINK_BASE, ALIGN_4KB, PTE_CB_CACHEABLE_WB | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for data at DM_DB_DLINK_BASE+0x1000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((DM_DB_DLINK_BASE+0x1000), ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((DM_DB_DLINK_BASE+0x1000), ALIGN_4KB, PTE_CB_UNCACHEABLE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for stack DM_DB_DLINK_BASE+0x2000 +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((DM_DB_DLINK_BASE+0x2000), ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((DM_DB_DLINK_BASE+0x2000), ALIGN_4KB, PTE_CB_CACHEABLE_WB | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK +- ! 0x0c000000 - 0x0c0fffff +- addi r1, 16, r1 +- LOAD_PTEH_VAL (DM_CB_DLINK_BASE, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (DM_CB_DLINK_BASE, ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK +- ! 0x0c100000 - 0x0c1fffff +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x100000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK +- ! 0x0c200000 - 0x0c2fffff +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x200000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x200000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK +- ! 0x0c400000 - 0x0c4fffff +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x400000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x400000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 1M page for DM_CB_BASE2 at DM_CB_DLINK +- ! 0x0c800000 - 0x0c8fffff +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((DM_CB_DLINK_BASE+0x800000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((DM_CB_DLINK_BASE+0x800000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map a 4K page for DMA control registers +- addi r1, 16, r1 +- LOAD_PTEH_VAL (DMA_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (DMA_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map lots of 4K pages for peripherals +- +- ! /* peripheral */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (PERIPH_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (PERIPH_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- ! /* dmac */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (DMAC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (DMAC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- ! /* intc */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (INTC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (INTC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- ! /* rtc */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (RTC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (RTC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- ! /* dmac */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (TMU_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (TMU_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- ! /* scif */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (SCIF_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (SCIF_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- ! /* cprc */ +- addi r1, 16, r1 +- LOAD_PTEH_VAL (CPRC_BASE, ALIGN_4KB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (CPRC_BASE, ALIGN_4KB, PTE_CB_DEVICE | PTE_SZ_4KB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Map CPU WPC registers +- addi r1, 16, r1 +- LOAD_PTEH_VAL (CPU_BASE, ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL (CPU_BASE, ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- addi r1, 16, r1 +- +- LOAD_PTEH_VAL ((CPU_BASE+0x100000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((CPU_BASE+0x100000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((CPU_BASE+0x200000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((CPU_BASE+0x200000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- addi r1, 16, r1 +- LOAD_PTEH_VAL ((CPU_BASE+0x400000), ALIGN_1MB, PTE_ENABLED | PTE_NOT_SHARED, r25, r2) +- LOAD_PTEL_VAL ((CPU_BASE+0x400000), ALIGN_1MB, PTE_CB_DEVICE | PTE_SZ_1MB | PTE_PRR | PTE_PRW | PTE_PRU, r25, r3) +- SET_PTE (r1, r2, r3) +- +- ! Switch over to virtual addressing and enabled cache +- getcon sr, r1 +- movi 1, r2 +- shlli r2, SR_BL_BIT, r2 +- or r1, r2, r1 +- putcon r1, ssr +- getcon sr, r1 +- movi 1, r2 +- shlli r2, SR_MMU_BIT, r2 +- or r1, r2, r1 +- putcon r1, ssr +- gettr tr1, r1 +- putcon r1, spc +- synco +- rte +- +- ! VM entry point. From now on, we are in VM mode. +-.vm_ep: +- +- ! Install the trap handler, by seeding vbr with the +- ! correct value, and by assigning sr.bl = 0. +- +- LOAD_ADDR (vbr_start, r1) +- putcon r1, vbr +- movi ~(1<<28), r1 +- getcon sr, r2 +- and r1, r2, r2 +- putcon r2, sr +-#endif /* MMU_SUPPORT */ +- +- pt/l .Lzero_bss_loop, tr0 +- pt/l _init, tr5 +- pt/l ___setup_argv_and_call_main, tr6 +- pt/l _exit, tr7 +- +- ! zero out bss +- LOAD_ADDR (_edata, r0) +- LOAD_ADDR (_end, r1) +-.Lzero_bss_loop: +- stx.q r0, r63, r63 +- addi r0, 8, r0 +- bgt/l r1, r0, tr0 +- +- LOAD_ADDR (___data, r26) +- LOAD_ADDR (___rodata, r27) +- +-#ifdef __SH_FPU_ANY__ +- getcon sr, r0 +- ! enable the FP unit, by resetting SR.FD +- ! also zero out SR.FR, SR.SZ and SR.PR, as mandated by the ABI +- movi 0, r1 +- shori 0xf000, r1 +- andc r0, r1, r0 +- putcon r0, sr +-#if __SH5__ == 32 +- pt/l ___set_fpscr, tr0 +- movi 0, r4 +- blink tr0, r18 +-#endif +-#endif +- +- ! arrange for exit to call fini +- pt/l _atexit, tr1 +- LOAD_ADDR (_fini, r2) +- blink tr1, r18 +- +- ! call init +- blink tr5, r18 +- +- ! call the mainline +- blink tr6, r18 +- +- ! call exit +- blink tr7, r18 +- ! We should never return from _exit but in case we do we would enter the +- ! the following tight loop. This avoids executing any data that might follow. +-limbo: +- pt/l limbo, tr0 +- blink tr0, r63 +- +-#ifdef MMU_SUPPORT +- ! All these traps are handled in the same place. +- .balign 256 +-vbr_start: +- pt/l handler, tr0 ! tr0 trashed. +- blink tr0, r63 +- .balign 256 +-vbr_100: +- pt/l handler, tr0 ! tr0 trashed. +- blink tr0, r63 +-vbr_100_end: +- .balign 256 +-vbr_200: +- pt/l handler, tr0 ! tr0 trashed. +- blink tr0, r63 +- .balign 256 +-vbr_300: +- pt/l handler, tr0 ! tr0 trashed. +- blink tr0, r63 +- .balign 256 +-vbr_400: ! Should be at vbr+0x400 +-handler: +- /* If the trap handler is there call it */ +- LOAD_ADDR (__superh_trap_handler, r2) +- pta chandler,tr2 +- beq r2, r63, tr2 /* If zero, ie not present branch around to chandler */ +- /* Now call the trap handler with as much of the context unchanged as possible. +- Move trapping address into R18 to make it look like the trap point */ +- getcon spc, r18 +- pt/l __superh_trap_handler, tr0 +- blink tr0, r7 +-chandler: +- getcon spc, r62 +- getcon expevt, r2 +- pt/l _exit, tr0 +- blink tr0, r63 +- +- /* Simulated trap handler */ +- .section .text..SHmedia32,"ax" +-gcc2_compiled.: +- .section .debug_abbrev +-.Ldebug_abbrev0: +- .section .text..SHmedia32 +-.Ltext0: +- .section .debug_info +-.Ldebug_info0: +- .section .debug_line +-.Ldebug_line0: +- .section .text..SHmedia32,"ax" +- .align 5 +- .global __superh_trap_handler +- .type __superh_trap_handler,@function +-__superh_trap_handler: +-.LFB1: +- ptabs r18, tr0 +- addi.l r15, -8, r15 +- st.l r15, 4, r14 +- addi.l r15, -8, r15 +- add.l r15, r63, r14 +- st.l r14, 0, r2 +- ptabs r7, tr0 +- addi.l r14, 8, r14 +- add.l r14, r63, r15 +- ld.l r15, 4, r14 +- addi.l r15, 8, r15 +- blink tr0, r63 +-.LFE1: +-.Lfe1: +- .size __superh_trap_handler,.Lfe1-__superh_trap_handler +- +- .section .text..SHmedia32 +-.Letext0: +- +- .section .debug_info +- .ualong 0xa7 +- .uaword 0x2 +- .ualong .Ldebug_abbrev0 +- .byte 0x4 +- .byte 0x1 +- .ualong .Ldebug_line0 +- .ualong .Letext0 +- .ualong .Ltext0 +- .string "trap_handler.c" +- +- .string "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" +- +- .string "GNU C 2.97-sh5-010522" +- +- .byte 0x1 +- .byte 0x2 +- .ualong 0x9a +- .byte 0x1 +- .string "_superh_trap_handler" +- +- .byte 0x1 +- .byte 0x2 +- .byte 0x1 +- .ualong .LFB1 +- .ualong .LFE1 +- .byte 0x1 +- .byte 0x5e +- .byte 0x3 +- .string "trap_reason" +- +- .byte 0x1 +- .byte 0x1 +- .ualong 0x9a +- .byte 0x2 +- .byte 0x91 +- .byte 0x0 +- .byte 0x0 +- .byte 0x4 +- .string "unsigned int" +- +- .byte 0x4 +- .byte 0x7 +- .byte 0x0 +- +- .section .debug_abbrev +- .byte 0x1 +- .byte 0x11 +- .byte 0x1 +- .byte 0x10 +- .byte 0x6 +- .byte 0x12 +- .byte 0x1 +- .byte 0x11 +- .byte 0x1 +- .byte 0x3 +- .byte 0x8 +- .byte 0x1b +- .byte 0x8 +- .byte 0x25 +- .byte 0x8 +- .byte 0x13 +- .byte 0xb +- .byte 0,0 +- .byte 0x2 +- .byte 0x2e +- .byte 0x1 +- .byte 0x1 +- .byte 0x13 +- .byte 0x3f +- .byte 0xc +- .byte 0x3 +- .byte 0x8 +- .byte 0x3a +- .byte 0xb +- .byte 0x3b +- .byte 0xb +- .byte 0x27 +- .byte 0xc +- .byte 0x11 +- .byte 0x1 +- .byte 0x12 +- .byte 0x1 +- .byte 0x40 +- .byte 0xa +- .byte 0,0 +- .byte 0x3 +- .byte 0x5 +- .byte 0x0 +- .byte 0x3 +- .byte 0x8 +- .byte 0x3a +- .byte 0xb +- .byte 0x3b +- .byte 0xb +- .byte 0x49 +- .byte 0x13 +- .byte 0x2 +- .byte 0xa +- .byte 0,0 +- .byte 0x4 +- .byte 0x24 +- .byte 0x0 +- .byte 0x3 +- .byte 0x8 +- .byte 0xb +- .byte 0xb +- .byte 0x3e +- .byte 0xb +- .byte 0,0 +- .byte 0 +- +- .section .debug_pubnames +- .ualong 0x27 +- .uaword 0x2 +- .ualong .Ldebug_info0 +- .ualong 0xab +- .ualong 0x5b +- .string "_superh_trap_handler" +- +- .ualong 0x0 +- +- .section .debug_aranges +- .ualong 0x1c +- .uaword 0x2 +- .ualong .Ldebug_info0 +- .byte 0x4 +- .byte 0x0 +- .uaword 0x0,0 +- .ualong .Ltext0 +- .ualong .Letext0-.Ltext0 +- .ualong 0x0 +- .ualong 0x0 +- .ident "GCC: (GNU) 2.97-sh5-010522" +-#endif /* MMU_SUPPORT */ +-#else /* ! __SH5__ */ +- +- ! make a place to keep any previous value of the vbr register +- ! this will only have a value if it has been set by redboot (for example) +- .section .bss +-old_vbr: +- .long 0 +-#ifdef PROFILE +-profiling_enabled: +- .long 0 +-#endif +- +- +- .section .text +- .global start +- .import ___rtos_profiler_start_timer +- .weak ___rtos_profiler_start_timer +-start: +- mov.l stack_k,r15 +- +-#if defined (__SH3__) || (defined (__SH_FPU_ANY__) && ! defined (__SH2A__)) || defined (__SH4_NOFPU__) +-#define VBR_SETUP +- ! before zeroing the bss ... +- ! if the vbr is already set to vbr_start then the program has been restarted +- ! (i.e. it is not the first time the program has been run since reset) +- ! reset the vbr to its old value before old_vbr (in bss) is wiped +- ! this ensures that the later code does not create a circular vbr chain +- stc vbr, r1 +- mov.l vbr_start_k, r2 +- cmp/eq r1, r2 +- bf 0f +- ! reset the old vbr value +- mov.l old_vbr_k, r1 +- mov.l @r1, r2 +- ldc r2, vbr +-0: +-#endif /* VBR_SETUP */ +- +- ! zero out bss +- mov.l edata_k,r0 +- mov.l end_k,r1 +- mov #0,r2 +-start_l: +- mov.l r2,@r0 +- add #4,r0 +- cmp/ge r0,r1 +- bt start_l +- +-#if defined (__SH_FPU_ANY__) +- mov.l set_fpscr_k, r1 +- mov #4,r4 +- jsr @r1 +- shll16 r4 ! Set DN bit (flush denormal inputs to zero) +- lds r3,fpscr ! Switch to default precision +-#endif /* defined (__SH_FPU_ANY__) */ +- +-#ifdef VBR_SETUP +- ! save the existing contents of the vbr +- ! there will only be a prior value when using something like redboot +- ! otherwise it will be zero +- stc vbr, r1 +- mov.l old_vbr_k, r2 +- mov.l r1, @r2 +- ! setup vbr +- mov.l vbr_start_k, r1 +- ldc r1,vbr +-#endif /* VBR_SETUP */ +- +- ! if an rtos is exporting a timer start fn, +- ! then pick up an SR which does not enable ints +- ! (the rtos will take care of this) +- mov.l rtos_start_fn, r0 +- mov.l sr_initial_bare, r1 +- tst r0, r0 +- bt set_sr +- +- mov.l sr_initial_rtos, r1 +- +-set_sr: +- ! Set status register (sr) +- ldc r1, sr +- +- ! arrange for exit to call fini +- mov.l atexit_k,r0 +- mov.l fini_k,r4 +- jsr @r0 ++ ! Disable interrupts (if they're enabled) ++ mov.l old_sr_addr,r0 ++ stc sr,r1 ++ mov.l r1,@r0 ++ mov.l init_sr,r0 ++ ldc r0,sr ++ ++ ! Run in the P2 area ++ mov.l setup_cache_addr,r0 ++ mov.l p2_mask,r1 ++ or r1,r0 ++ jmp @r0 + nop + +-#ifdef PROFILE +- ! arrange for exit to call _mcleanup (via stop_profiling) +- mova stop_profiling,r0 +- mov.l atexit_k,r1 +- jsr @r1 +- mov r0, r4 +- +- ! Call profiler startup code +- mov.l monstartup_k, r0 +- mov.l start_k, r4 +- mov.l etext_k, r5 +- jsr @r0 ++setup_cache: ++ ! Now that we are in P2, it's safe to enable the cache ++ ! Check to see if we should enable OCRAM. ++ mov.l kos_init_flags_addr, r0 ++ add #2, r0 ++ mov.w @r0, r0 ++ tst #1, r0 ++ bf .L_setup_cache_L0 ++ mov.w ccr_data,r1 ++ bra .L_setup_cache_L1 ++ nop ++.L_setup_cache_L0: ++ mov.w ccr_data_ocram,r1 ++.L_setup_cache_L1: ++ mov.l ccr_addr,r0 ++ mov.l r1,@r0 ++ ++ ! After changing CCR, eight instructions must be executed before ++ ! it's safe to enter a cached area such as P1 ++ nop ! 1 ++ nop ! 2 ++ nop ! 3 ++ nop ! 4 ++ nop ! 5 (d-cache now safe) ++ nop ! 6 ++ mov.l init_addr,r0 ! 7 ++ mov #0,r1 ! 8 ++ jmp @r0 ! go ++ mov r1,r0 + nop + +- ! enable profiling trap +- ! until now any trap 33s will have been ignored +- ! This means that all library functions called before this point +- ! (directly or indirectly) may have the profiling trap at the start. +- ! Therefore, only mcount itself may not have the extra header. +- mov.l profiling_enabled_k2, r0 +- mov #1, r1 +- mov.l r1, @r0 +-#endif /* PROFILE */ ++init: ++ ! Save old PR on old stack so we can get to it later ++ sts.l pr,@-r15 + +- ! call init +- mov.l init_k,r0 ++ ! Save the current stack, and set a new stack (higher up in RAM) ++ mov.l old_stack_addr,r0 ++ mov.l r15,@r0 ++ mov.l new_stack,r15 ++ ++ ! Save VBR ++ mov.l old_vbr_addr,r0 ++ stc vbr,r1 ++ mov.l r1,@r0 ++ ++ ! Save FPSCR ++ mov.l old_fpscr_addr,r0 ++ sts fpscr,r1 ++ mov.l r1,@r0 ++ ++ ! Reset FPSCR ++ mov #4,r4 ! Use 00040000 (DN=1) ++ mov.l fpscr_addr,r0 + jsr @r0 +- nop ++ shll16 r4 + +- ! call the mainline +- mov.l main_k,r0 +- jsr @r0 +- nop ++ ! Setup a sentinel value for frame pointer in case we're using ++ ! FRAME_POINTERS for stack tracing. ++ mov #-1,r14 + +- ! call exit +- mov r0,r4 +- mov.l exit_k,r0 ++ ! Jump to the kernel main ++ mov.l main_addr,r0 + jsr @r0 + nop +- +- .balign 4 +-#ifdef PROFILE +-stop_profiling: +- # stop mcount counting +- mov.l profiling_enabled_k2, r0 +- mov #0, r1 +- mov.l r1, @r0 +- +- # call mcleanup +- mov.l mcleanup_k, r0 +- jmp @r0 +- nop +- +- .balign 4 +-mcleanup_k: +- .long __mcleanup +-monstartup_k: +- .long ___monstartup +-profiling_enabled_k2: +- .long profiling_enabled +-start_k: +- .long _start +-etext_k: +- .long __etext +-#endif /* PROFILE */ + +- .align 2 +-#if defined (__SH_FPU_ANY__) +-set_fpscr_k: +- .long ___set_fpscr +-#endif /* defined (__SH_FPU_ANY__) */ ++ ! Program can return here (not likely) or jump here directly ++ ! from anywhere in it to go straight back to the monitor ++_arch_real_exit: ++ ! Reset SR ++ mov.l old_sr,r0 ++ ldc r0,sr ++ ++ ! Disable MMU, invalidate TLB ++ mov #4,r0 ++ mov.l mmu_addr,r1 ++ mov.l r0,@r1 ++ ++ ! Wait (just in case) ++ nop ! 1 ++ nop ! 2 ++ nop ! 3 ++ nop ! 4 ++ nop ! 5 ++ nop ! 6 ++ nop ! 7 ++ nop ! 8 ++ ++ ! Restore VBR ++ mov.l old_vbr,r0 ++ ldc r0,vbr + +-stack_k: +- .long _stack +-edata_k: +- .long _edata +-end_k: +- .long _end +-main_k: +- .long ___setup_argv_and_call_main +-exit_k: +- .long _exit +-atexit_k: +- .long _atexit +-init_k: +- .long _init +-fini_k: +- .long _fini +-#ifdef VBR_SETUP +-old_vbr_k: +- .long old_vbr +-vbr_start_k: +- .long vbr_start +-#endif /* VBR_SETUP */ +- +-sr_initial_rtos: +- ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work. +- ! Whether profiling or not, keep interrupts masked, +- ! the RTOS will enable these if required. +- .long 0x600000f1 +- +-rtos_start_fn: +- .long ___rtos_profiler_start_timer +- +-#ifdef PROFILE +-sr_initial_bare: +- ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work. +- ! For bare machine, we need to enable interrupts to get profiling working +- .long 0x60000001 +-#else ++ ! If we're working under dcload, call its EXIT syscall ++ mov.l dcload_magic_addr,r0 ++ mov.l @r0,r0 ++ mov.l dcload_magic_value,r1 ++ cmp/eq r0,r1 ++ bf normal_exit + +-sr_initial_bare: +- ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work. +- ! Keep interrupts disabled - the application will enable as required. +- .long 0x600000f1 +-#endif ++ mov.l dcload_syscall,r0 ++ mov.l @r0,r0 ++ jsr @r0 ++ mov #15,r4 + +- ! supplied for backward compatibility only, in case of linking +- ! code whose main() was compiled with an older version of GCC. +- .global ___main +-___main: ++ ! Set back the stack and return (presumably to a serial debug) ++normal_exit: ++ mov.l old_stack,r15 ++ lds.l @r15+,pr + rts + nop +-#ifdef VBR_SETUP +-! Exception handlers +- .section .text.vbr, "ax" +-vbr_start: +- +- .org 0x100 +-vbr_100: +-#ifdef PROFILE +- ! Note on register usage. +- ! we use r0..r3 as scratch in this code. If we are here due to a trapa for profiling +- ! then this is OK as we are just before executing any function code. +- ! The other r4..r7 we save explicityl on the stack +- ! Remaining registers are saved by normal ABI conventions and we assert we do not +- ! use floating point registers. +- mov.l expevt_k1, r1 +- mov.l @r1, r1 +- mov.l event_mask, r0 +- and r0,r1 +- mov.l trapcode_k, r2 +- cmp/eq r1,r2 +- bt 1f +- bra handler_100 ! if not a trapa, go to default handler +- nop +-1: +- mov.l trapa_k, r0 +- mov.l @r0, r0 +- shlr2 r0 ! trapa code is shifted by 2. +- cmp/eq #33, r0 +- bt 2f +- bra handler_100 +- nop +-2: +- +- ! If here then it looks like we have trap #33 +- ! Now we need to call mcount with the following convention +- ! Save and restore r4..r7 +- mov.l r4,@-r15 +- mov.l r5,@-r15 +- mov.l r6,@-r15 +- mov.l r7,@-r15 +- sts.l pr,@-r15 +- +- ! r4 is frompc. +- ! r5 is selfpc +- ! r0 is the branch back address. +- ! The code sequence emitted by gcc for the profiling trap is +- ! .align 2 +- ! trapa #33 +- ! .align 2 +- ! .long lab Where lab is planted by the compiler. This is the address +- ! of a datum that needs to be incremented. +- sts pr, r4 ! frompc +- stc spc, r5 ! selfpc +- mov #2, r2 +- not r2, r2 ! pattern to align to 4 +- and r2, r5 ! r5 now has aligned address +-! add #4, r5 ! r5 now has address of address +- mov r5, r2 ! Remember it. +-! mov.l @r5, r5 ! r5 has value of lable (lab in above example) +- add #8, r2 +- ldc r2, spc ! our return address avoiding address word +- +- ! only call mcount if profiling is enabled +- mov.l profiling_enabled_k, r0 +- mov.l @r0, r0 +- cmp/eq #0, r0 +- bt 3f +- ! call mcount +- mov.l mcount_k, r2 +- jsr @r2 +- nop +-3: +- lds.l @r15+,pr +- mov.l @r15+,r7 +- mov.l @r15+,r6 +- mov.l @r15+,r5 +- mov.l @r15+,r4 +- rte +- nop +- .balign 4 +-event_mask: +- .long 0xfff +-trapcode_k: +- .long 0x160 +-expevt_k1: +- .long 0xff000024 ! Address of expevt +-trapa_k: +- .long 0xff000020 +-mcount_k: +- .long __call_mcount +-profiling_enabled_k: +- .long profiling_enabled +-#endif +- ! Non profiling case. +-handler_100: +- mov.l 2f, r0 ! load the old vbr setting (if any) +- mov.l @r0, r0 +- cmp/eq #0, r0 +- bf 1f +- ! no previous vbr - jump to own generic handler +- bra handler +- nop +-1: ! there was a previous handler - chain them +- add #0x7f, r0 ! 0x7f +- add #0x7f, r0 ! 0xfe +- add #0x2, r0 ! add 0x100 without corrupting another register +- jmp @r0 +- nop +- .balign 4 +-2: +- .long old_vbr + +- .org 0x400 +-vbr_400: ! Should be at vbr+0x400 +- mov.l 2f, r0 ! load the old vbr setting (if any) +- mov.l @r0, r0 +- cmp/eq #0, r0 +- ! no previous vbr - jump to own generic handler +- bt handler +- ! there was a previous handler - chain them +- rotcr r0 +- rotcr r0 +- add #0x7f, r0 ! 0x1fc +- add #0x7f, r0 ! 0x3f8 +- add #0x02, r0 ! 0x400 +- rotcl r0 +- rotcl r0 ! Add 0x400 without corrupting another register +- jmp @r0 +- nop +- .balign 4 +-2: +- .long old_vbr +-handler: +- /* If the trap handler is there call it */ +- mov.l superh_trap_handler_k, r0 +- cmp/eq #0, r0 ! True if zero. +- bf 3f +- bra chandler +- nop +-3: +- ! Here handler available, call it. +- /* Now call the trap handler with as much of the context unchanged as possible. +- Move trapping address into PR to make it look like the trap point */ +- stc spc, r1 +- lds r1, pr +- mov.l expevt_k, r4 +- mov.l @r4, r4 ! r4 is value of expevt, first parameter. +- mov r1, r5 ! Remember trapping pc. +- mov r1, r6 ! Remember trapping pc. +- mov.l chandler_k, r1 +- mov.l superh_trap_handler_k, r2 +- ! jmp to trap handler to avoid disturbing pr. +- jmp @r2 +- nop +- +- .org 0x600 +-vbr_600: +-#ifdef PROFILE +- ! Should be at vbr+0x600 +- ! Now we are in the land of interrupts so need to save more state. +- ! Save register state +- mov.l interrupt_stack_k, r15 ! r15 has been saved to sgr. +- mov.l r0,@-r15 +- mov.l r1,@-r15 +- mov.l r2,@-r15 +- mov.l r3,@-r15 +- mov.l r4,@-r15 +- mov.l r5,@-r15 +- mov.l r6,@-r15 +- mov.l r7,@-r15 +- sts.l pr,@-r15 +- sts.l mach,@-r15 +- sts.l macl,@-r15 +-#if defined(__SH_FPU_ANY__) +- ! Save fpul and fpscr, save fr0-fr7 in 64 bit mode +- ! and set the pervading precision for the timer_handler +- mov #0,r0 +- sts.l fpul,@-r15 +- sts.l fpscr,@-r15 +- lds r0,fpscr ! Clear fpscr +- fmov fr0,@-r15 +- fmov fr1,@-r15 +- fmov fr2,@-r15 +- fmov fr3,@-r15 +- mov.l pervading_precision_k,r0 +- fmov fr4,@-r15 +- fmov fr5,@-r15 +- mov.l @r0,r0 +- fmov fr6,@-r15 +- fmov fr7,@-r15 +- lds r0,fpscr +-#endif /* __SH_FPU_ANY__ */ +- ! Pass interrupted pc to timer_handler as first parameter (r4). +- stc spc, r4 +- mov.l timer_handler_k, r0 +- jsr @r0 +- nop +-#if defined(__SH_FPU_ANY__) +- mov #0,r0 +- lds r0,fpscr ! Clear the fpscr +- fmov @r15+,fr7 +- fmov @r15+,fr6 +- fmov @r15+,fr5 +- fmov @r15+,fr4 +- fmov @r15+,fr3 +- fmov @r15+,fr2 +- fmov @r15+,fr1 +- fmov @r15+,fr0 +- lds.l @r15+,fpscr +- lds.l @r15+,fpul +-#endif /* __SH_FPU_ANY__ */ +- lds.l @r15+,macl +- lds.l @r15+,mach +- lds.l @r15+,pr +- mov.l @r15+,r7 +- mov.l @r15+,r6 +- mov.l @r15+,r5 +- mov.l @r15+,r4 +- mov.l @r15+,r3 +- mov.l @r15+,r2 +- mov.l @r15+,r1 +- mov.l @r15+,r0 +- stc sgr, r15 ! Restore r15, destroyed by this sequence. +- rte +- nop +-#if defined(__SH_FPU_ANY__) +- .balign 4 +-pervading_precision_k: +-#define CONCAT1(A,B) A##B +-#define CONCAT(A,B) CONCAT1(A,B) +- .long CONCAT(__USER_LABEL_PREFIX__,__fpscr_values)+4 +-#endif +-#else +- mov.l 2f, r0 ! Load the old vbr setting (if any). +- mov.l @r0, r0 +- cmp/eq #0, r0 +- ! no previous vbr - jump to own handler +- bt chandler +- ! there was a previous handler - chain them +- rotcr r0 +- rotcr r0 +- add #0x7f, r0 ! 0x1fc +- add #0x7f, r0 ! 0x3f8 +- add #0x7f, r0 ! 0x5f4 +- add #0x03, r0 ! 0x600 +- rotcl r0 +- rotcl r0 ! Add 0x600 without corrupting another register +- jmp @r0 +- nop +- .balign 4 +-2: +- .long old_vbr +-#endif /* PROFILE code */ +-chandler: +- mov.l expevt_k, r4 +- mov.l @r4, r4 ! r4 is value of expevt hence making this the return code +- mov.l handler_exit_k,r0 +- jsr @r0 +- nop +- ! We should never return from _exit but in case we do we would enter the +- ! the following tight loop +-limbo: +- bra limbo +- nop +- .balign 4 +-#ifdef PROFILE +-interrupt_stack_k: +- .long __timer_stack ! The high end of the stack +-timer_handler_k: +- .long __profil_counter +-#endif +-expevt_k: +- .long 0xff000024 ! Address of expevt +-chandler_k: +- .long chandler +-superh_trap_handler_k: +- .long __superh_trap_handler +-handler_exit_k: +- .long _exit +- .align 2 +-! Simulated compile of trap handler. +- .section .debug_abbrev,"",@progbits +-.Ldebug_abbrev0: +- .section .debug_info,"",@progbits +-.Ldebug_info0: +- .section .debug_line,"",@progbits +-.Ldebug_line0: +- .text +-.Ltext0: +- .align 5 +- .type __superh_trap_handler,@function +-__superh_trap_handler: +-.LFB1: +- mov.l r14,@-r15 +-.LCFI0: +- add #-4,r15 +-.LCFI1: +- mov r15,r14 +-.LCFI2: +- mov.l r4,@r14 +- lds r1, pr +- add #4,r14 +- mov r14,r15 +- mov.l @r15+,r14 +- rts +- nop +-.LFE1: +-.Lfe1: +- .size __superh_trap_handler,.Lfe1-__superh_trap_handler +- .section .debug_frame,"",@progbits +-.Lframe0: +- .ualong .LECIE0-.LSCIE0 +-.LSCIE0: +- .ualong 0xffffffff +- .byte 0x1 +- .string "" +- .uleb128 0x1 +- .sleb128 -4 +- .byte 0x11 +- .byte 0xc +- .uleb128 0xf +- .uleb128 0x0 +- .align 2 +-.LECIE0: +-.LSFDE0: +- .ualong .LEFDE0-.LASFDE0 +-.LASFDE0: +- .ualong .Lframe0 +- .ualong .LFB1 +- .ualong .LFE1-.LFB1 +- .byte 0x4 +- .ualong .LCFI0-.LFB1 +- .byte 0xe +- .uleb128 0x4 +- .byte 0x4 +- .ualong .LCFI1-.LCFI0 +- .byte 0xe +- .uleb128 0x8 +- .byte 0x8e +- .uleb128 0x1 +- .byte 0x4 +- .ualong .LCFI2-.LCFI1 +- .byte 0xd +- .uleb128 0xe +- .align 2 +-.LEFDE0: +- .text +-.Letext0: +- .section .debug_info +- .ualong 0xb3 +- .uaword 0x2 +- .ualong .Ldebug_abbrev0 +- .byte 0x4 +- .uleb128 0x1 +- .ualong .Ldebug_line0 +- .ualong .Letext0 +- .ualong .Ltext0 +- .string "trap_handler.c" +- .string "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" +- .string "GNU C 3.2 20020529 (experimental)" +- .byte 0x1 +- .uleb128 0x2 +- .ualong 0xa6 +- .byte 0x1 +- .string "_superh_trap_handler" +- .byte 0x1 +- .byte 0x2 +- .byte 0x1 +- .ualong .LFB1 +- .ualong .LFE1 +- .byte 0x1 +- .byte 0x5e +- .uleb128 0x3 +- .string "trap_reason" +- .byte 0x1 +- .byte 0x1 +- .ualong 0xa6 +- .byte 0x2 +- .byte 0x91 +- .sleb128 0 +- .byte 0x0 +- .uleb128 0x4 +- .string "unsigned int" +- .byte 0x4 +- .byte 0x7 +- .byte 0x0 +- .section .debug_abbrev +- .uleb128 0x1 +- .uleb128 0x11 +- .byte 0x1 +- .uleb128 0x10 +- .uleb128 0x6 +- .uleb128 0x12 +- .uleb128 0x1 +- .uleb128 0x11 +- .uleb128 0x1 +- .uleb128 0x3 +- .uleb128 0x8 +- .uleb128 0x1b +- .uleb128 0x8 +- .uleb128 0x25 +- .uleb128 0x8 +- .uleb128 0x13 +- .uleb128 0xb +- .byte 0x0 +- .byte 0x0 +- .uleb128 0x2 +- .uleb128 0x2e +- .byte 0x1 +- .uleb128 0x1 +- .uleb128 0x13 +- .uleb128 0x3f +- .uleb128 0xc +- .uleb128 0x3 +- .uleb128 0x8 +- .uleb128 0x3a +- .uleb128 0xb +- .uleb128 0x3b +- .uleb128 0xb +- .uleb128 0x27 +- .uleb128 0xc +- .uleb128 0x11 +- .uleb128 0x1 +- .uleb128 0x12 +- .uleb128 0x1 +- .uleb128 0x40 +- .uleb128 0xa +- .byte 0x0 +- .byte 0x0 +- .uleb128 0x3 +- .uleb128 0x5 +- .byte 0x0 +- .uleb128 0x3 +- .uleb128 0x8 +- .uleb128 0x3a +- .uleb128 0xb +- .uleb128 0x3b +- .uleb128 0xb +- .uleb128 0x49 +- .uleb128 0x13 +- .uleb128 0x2 +- .uleb128 0xa +- .byte 0x0 +- .byte 0x0 +- .uleb128 0x4 +- .uleb128 0x24 +- .byte 0x0 +- .uleb128 0x3 +- .uleb128 0x8 +- .uleb128 0xb +- .uleb128 0xb +- .uleb128 0x3e +- .uleb128 0xb +- .byte 0x0 +- .byte 0x0 +- .byte 0x0 +- .section .debug_pubnames,"",@progbits +- .ualong 0x27 +- .uaword 0x2 +- .ualong .Ldebug_info0 +- .ualong 0xb7 +- .ualong 0x67 +- .string "_superh_trap_handler" +- .ualong 0x0 +- .section .debug_aranges,"",@progbits +- .ualong 0x1c +- .uaword 0x2 +- .ualong .Ldebug_info0 +- .byte 0x4 +- .byte 0x0 +- .uaword 0x0 +- .uaword 0x0 +- .ualong .Ltext0 +- .ualong .Letext0-.Ltext0 +- .ualong 0x0 +- .ualong 0x0 +-#endif /* VBR_SETUP */ +-#endif /* ! __SH5__ */ ++! Misc variables ++ .align 2 ++dcload_magic_addr: ++ .long 0x8c004004 ++dcload_magic_value: ++ .long 0xdeadbeef ++dcload_syscall: ++ .long 0x8c004008 ++__arch_old_sr: ++old_sr: ++ .long 0 ++__arch_old_vbr: ++old_vbr: ++ .long 0 ++__arch_old_fpscr: ++old_fpscr: ++ .long 0 ++init_sr: ++ .long 0x500000f0 ++old_sr_addr: ++ .long old_sr ++old_vbr_addr: ++ .long old_vbr ++old_fpscr_addr: ++ .long old_fpscr ++old_stack_addr: ++ .long old_stack ++__arch_old_stack: ++old_stack: ++ .long 0 ++new_stack: ++ .long 0x8d000000 ++p2_mask: ++ .long 0xa0000000 ++setup_cache_addr: ++ .long setup_cache ++init_addr: ++ .long init ++main_addr: ++ .long _arch_main ++mmu_addr: ++ .long 0xff000010 ++fpscr_addr: ++ .long ___set_fpscr ! in libgcc ++kos_init_flags_addr: ++ .long ___kos_init_flags ++ccr_addr: ++ .long 0xff00001c ++ccr_data: ++ .word 0x090d ++ccr_data_ocram: ++ .word 0x092d +diff -ruN gcc-4.5.2-orig/gcc/configure gcc-4.5.2/gcc/configure +--- gcc-4.5.2-orig/gcc/configure 2010-12-03 07:35:37.000000000 -0500 ++++ gcc-4.5.2/gcc/configure 2011-01-07 23:18:44.000000000 -0500 +@@ -10421,7 +10421,7 @@ + target_thread_file='single' + ;; + aix | dce | gnat | irix | posix | posix95 | rtems | \ +- single | solaris | vxworks | win32 | mipssde) ++ single | solaris | vxworks | win32 | mipssde | kos) + target_thread_file=${enable_threads} + ;; + *) +diff -ruN gcc-4.5.2-orig/gcc/gthr-kos.h gcc-4.5.2/gcc/gthr-kos.h +--- gcc-4.5.2-orig/gcc/gthr-kos.h 1969-12-31 19:00:00.000000000 -0500 ++++ gcc-4.5.2/gcc/gthr-kos.h 2011-01-08 23:30:52.000000000 -0500 +@@ -0,0 +1,388 @@ ++/* Copyright (C) 2009, 2010, 2011 Lawrence Sebald */ ++ ++/* Threads compatibility routines for libgcc2 and libobjc. */ ++/* Compile this one with gcc. */ ++/* Copyright (C) 1997, 1999, 2000, 2004, 2008, 2009 ++ Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++<http://www.gnu.org/licenses/>. */ ++ ++#ifndef GCC_GTHR_KOS_H ++#define GCC_GTHR_KOS_H ++ ++/* KallistiOS threads specific definitions. */ ++ ++#define __GTHREADS 1 ++#define __GTHREADS_CXX0X 1 ++#define __GTHREAD_HAS_COND 1 ++ ++#include <assert.h> ++#include <kos/thread.h> ++#include <kos/tls.h> ++#include <kos/mutex.h> ++#include <kos/recursive_lock.h> ++#include <kos/once.h> ++#include <kos/cond.h> ++#include <time.h> ++ ++/* These should work just fine. */ ++typedef kthread_key_t __gthread_key_t; ++typedef kthread_once_t __gthread_once_t; ++typedef mutex_t * __gthread_mutex_t; ++typedef recursive_lock_t * __gthread_recursive_mutex_t; ++typedef condvar_t * __gthread_cond_t; ++typedef kthread_t * __gthread_t; ++typedef struct timespec __gthread_time_t; ++ ++static void __gthr_mutex_init(__gthread_mutex_t *__mutex) { ++ assert(__mutex); ++ *__mutex = mutex_create(); ++} ++ ++static void __gthr_recursive_mutex_init(__gthread_recursive_mutex_t *__mutex) { ++ assert(__mutex); ++ *__mutex = rlock_create(); ++} ++ ++static void __gthr_cond_init(__gthread_cond_t *__cond) { ++ assert(__cond); ++ *__cond = cond_create(); ++} ++ ++#define __GTHREAD_ONCE_INIT KTHREAD_ONCE_INIT ++#define __GTHREAD_MUTEX_INIT_FUNCTION __gthr_mutex_init ++#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthr_recursive_mutex_init ++#define __GTHREAD_COND_INIT_FUNCTION __gthr_cond_init ++ ++static inline int __gthread_active_p(void) { ++ return 1; ++} ++ ++#ifdef _LIBOBJC ++ ++/* This stuff only applies to Objective C. */ ++ ++/* The config.h file in libobjc/ */ ++#include <config.h> ++ ++/* Key structure for maintaining thread specific storage */ ++static kthread_key_t _objc_thread_storage; ++ ++/* Backend initialization funcitons */ ++ ++/* Initialize the threads subsystem. */ ++static inline int __gthread_objc_init_thread_system(void) { ++ /* The only thing we have to do is to initialize the storage key. */ ++ return kthread_key_create(&_objc_thread_storage, NULL); ++} ++ ++/* Close the threads subsystem. */ ++static inline int __gthread_objc_close_thread_system(void) { ++ return kthread_key_delete(_objc_thread_storage); ++} ++ ++/* Backend thread functions */ ++ ++/* Create a new thread of execution. */ ++static inline objc_thread_t __gthread_objc_thread_detach(void (*func)(void *), ++ void *arg) { ++ kthread_t *thd_hnd; ++ ++ thd_hnd = thd_create(1, (void *)(void *)func, arg); ++ return (objc_thread_t)thd_hnd; ++} ++ ++/* Set the current thread's priority. */ ++static inline int __gthread_objc_thread_set_priority(int priority __attribute__((unused))) { ++ /* XXXX */ ++ return -1; ++} ++ ++/* Return the current thread's priority. */ ++static inline int __gthread_objc_thread_get_priority(void) { ++ /* XXXX */ ++ return OBJC_THREAD_INTERACTIVE_PRIORITY; ++} ++ ++/* Yield our process time to another thread. */ ++static inline void __gthread_objc_thread_yield(void) { ++ thd_pass(); ++} ++ ++/* Terminate the current thread. */ ++static inline int __gthread_objc_thread_exit(void) { ++ thd_exit(NULL); ++ ++ /* Failed if we reached here */ ++ return -1; ++} ++ ++/* Returns an integer value which uniquely describes a thread. */ ++static inline objc_thread_t __gthread_objc_thread_id(void) { ++ return (objc_thread_t)thd_get_current(); ++} ++ ++/* Sets the thread's local storage pointer. */ ++static inline int __gthread_objc_thread_set_data(void *value) { ++ return kthread_setspecific(_objc_thread_storage, value); ++} ++ ++/* Returns the thread's local storage pointer. */ ++static inline void *__gthread_objc_thread_get_data(void) { ++ return kthread_getspecific(_objc_thread_storage); ++} ++ ++/* Backend mutex functions */ ++ ++/* Allocate a mutex. */ ++static inline int __gthread_objc_mutex_allocate(objc_mutex_t mutex) { ++ mutex_t *m = mutex_create(); ++ ++ if(m) { ++ mutex->backend = (void *)m; ++ return 0; ++ } ++ ++ mutex->backend = NULL; ++ return -1; ++} ++ ++/* Deallocate a mutex. */ ++static inline int __gthread_objc_mutex_deallocate(objc_mutex_t mutex) { ++ mutex_t *m = (mutex_t *)mutex->backend; ++ ++ if(mutex_is_locked(m)) { ++ mutex_unlock(m); ++ } ++ ++ mutex_destroy(m); ++ mutex->backend = NULL; ++ ++ return 0; ++} ++ ++/* Grab a lock on a mutex. */ ++static inline int __gthread_objc_mutex_lock(objc_mutex_t mutex) { ++ return mutex_lock((mutex_t *)mutex->backend); ++} ++ ++/* Try to grab a lock on a mutex. */ ++static inline int __gthread_objc_mutex_trylock(objc_mutex_t mutex) { ++ return mutex_trylock((mutex_t *)mutex->backend); ++} ++ ++/* Unlock the mutex. */ ++static inline int __gthread_objc_mutex_unlock(objc_mutex_t mutex) { ++ mutex_unlock((mutex_t *)mutex->backend); ++ return 0; ++} ++ ++/* Backend condition mutex functions */ ++ ++/* Allocate a condition. */ ++static inline int __gthread_objc_condition_allocate(objc_condition_t cond) { ++ condvar_t *c = cond_create(); ++ ++ if(c) { ++ cond->backend = (void *)c; ++ return 0; ++ } ++ ++ cond->backend = NULL; ++ return -1; ++} ++ ++/* Deallocate a condition. */ ++static inline int __gthread_objc_condition_deallocate(objc_condition_t cond) { ++ cond_destroy((condvar_t *)cond->backend); ++ cond->backend = NULL; ++ return 0; ++} ++ ++/* Wait on the condition. */ ++static inline int __gthread_objc_condition_wait(objc_condition_t cond, ++ objc_mutex_t mutex) { ++ return cond_wait((condvar_t *)cond->backend, (mutex_t *)mutex->backend); ++} ++ ++/* Wake up all threads waiting on this condition. */ ++static inline int __gthread_objc_condition_broadcast(objc_condition_t cond) { ++ cond_broadcast((condvar_t *)cond->backend); ++ return 0; ++} ++ ++/* Wake up one thread waiting on this condition. */ ++static inline int __gthread_objc_condition_signal(objc_condition_t cond) { ++ cond_signal((condvar_t *)cond->backend); ++ return 0; ++} ++ ++#else /* _LIBOBJC */ ++ ++static inline int __gthread_once(__gthread_once_t *__once, ++ void (*__func)(void)) { ++ return kthread_once(__once, __func); ++} ++ ++static inline int __gthread_key_create(__gthread_key_t *__key, ++ void (*__func)(void *)) { ++ return kthread_key_create(__key, __func); ++} ++ ++static int __gthread_key_delete(__gthread_key_t __key) { ++ return kthread_key_delete(__key); ++} ++ ++static inline void *__gthread_getspecific(__gthread_key_t __key) { ++ return kthread_getspecific(__key); ++} ++ ++static inline int __gthread_setspecific(__gthread_key_t __key, ++ const void *__v) { ++ return kthread_setspecific(__key, __v); ++} ++ ++static inline int __gthread_mutex_destroy(__gthread_mutex_t *__mutex) { ++ assert(__mutex); ++ mutex_destroy(*__mutex); ++ return 0; ++} ++ ++static inline int __gthread_mutex_lock(__gthread_mutex_t *__mutex) { ++ assert(__mutex); ++ return mutex_lock(*__mutex); ++} ++ ++static inline int __gthread_mutex_trylock(__gthread_mutex_t *__mutex) { ++ assert(__mutex); ++ return mutex_trylock(*__mutex); ++} ++ ++static inline int __gthread_mutex_unlock(__gthread_mutex_t *__mutex) { ++ assert(__mutex); ++ mutex_unlock(*__mutex); ++ return 0; ++} ++ ++static inline int __gthread_recursive_mutex_lock(__gthread_recursive_mutex_t *__mutex) { ++ assert(__mutex); ++ return rlock_lock(*__mutex); ++} ++ ++static inline int __gthread_recursive_mutex_trylock(__gthread_recursive_mutex_t *__mutex) { ++ assert(__mutex); ++ return rlock_trylock(*__mutex); ++} ++ ++static inline int __gthread_recursive_mutex_unlock(__gthread_recursive_mutex_t *__mutex) { ++ assert(__mutex); ++ return rlock_unlock(*__mutex); ++} ++ ++static inline int __gthread_cond_broadcast(__gthread_cond_t *cond) { ++ assert(cond); ++ cond_broadcast(*cond); ++ return 0; ++} ++ ++static inline int __gthread_cond_wait(__gthread_cond_t *cond, __gthread_mutex_t *mutex) { ++ assert(cond); ++ return cond_wait(*cond, *mutex); ++} ++ ++static inline int __gthread_cond_wait_recursive(__gthread_cond_t *cond, ++ __gthread_recursive_mutex_t *mutex) { ++ assert(cond); ++ return cond_wait_recursive(*cond, *mutex); ++} ++ ++/* C++0x support functions */ ++ ++static inline int __gthread_create(__gthread_t *thd, void *(*func)(void *), ++ void *args) { ++ assert(thd); ++ *thd = thd_create(0, func, args); ++ return (*thd == NULL); ++} ++ ++static inline int __gthread_join(__gthread_t thd, void **value_ptr) { ++ assert(thd); ++ return thd_join(thd, value_ptr); ++} ++ ++static inline int __gthread_detach(__gthread_t thd) { ++ assert(thd); ++ return thd_detach(thd); ++} ++ ++static inline int __gthread_equal(__gthread_t t1, __gthread_t t2) { ++ return t1 == t2; ++} ++ ++static inline __gthread_t __gthread_self(void) { ++ return thd_get_current(); ++} ++ ++static inline int __gthread_yield(void) { ++ thd_pass(); ++ return 0; ++} ++ ++static inline int __gthread_mutex_timedlock(__gthread_mutex_t *m, ++ const __gthread_time_t *timeout) { ++ int t = (int)(timeout->tv_sec + (timeout->tv_nsec / 1000)); ++ assert(m); ++ return mutex_lock_timed(*m, t); ++} ++ ++static inline int __gthread_recursive_mutex_timedlock(__gthread_recursive_mutex_t *l, ++ const __gthread_time_t *timeout) { ++ int t = (int)(timeout->tv_sec + (timeout->tv_nsec / 1000)); ++ assert(l); ++ return rlock_lock_timed(*l, t); ++} ++ ++static inline int __gthread_cond_signal(__gthread_cond_t *cond) { ++ assert(cond); ++ cond_signal(*cond); ++ return 0; ++} ++ ++static inline int __gthread_cond_timedwait(__gthread_cond_t *cond, ++ __gthread_mutex_t *mutex, ++ const __gthread_time_t *timeout) { ++ int t = (int)(timeout->tv_sec + (timeout->tv_nsec / 1000)); ++ assert(cond); ++ return cond_wait_timed(*cond, *mutex, t); ++} ++ ++static inline int __gthread_cond_timedwait_recursive(__gthread_cond_t *cond, ++ __gthread_recursive_mutex_t *l, ++ const __gthread_time_t *timeout) { ++ int t = (int)(timeout->tv_sec + (timeout->tv_nsec / 1000)); ++ assert(cond); ++ return cond_wait_timed_recursive(*cond, *l, t); ++} ++ ++#endif /* _LIBOBJC */ ++ ++#endif /* ! GCC_GTHR_KOS_H */ Added: kos/utils/dc-chain/patches/newlib-1.19.0-kos.diff =================================================================== --- kos/utils/dc-chain/patches/newlib-1.19.0-kos.diff (rev 0) +++ kos/utils/dc-chain/patches/newlib-1.19.0-kos.diff 2011-01-09 04:53:55 UTC (rev 699) @@ -0,0 +1,487 @@ +diff -ruN newlib-1.19.0-orig/newlib/configure.host newlib-1.19.0/newlib/configure.host +--- newlib-1.19.0-orig/newlib/configure.host 2010-12-02 14:30:46.000000000 -0500 ++++ newlib-1.19.0/newlib/configure.host 2011-01-08 00:28:36.000000000 -0500 +@@ -234,6 +234,7 @@ + ;; + sh | sh64) + machine_dir=sh ++ newlib_cflags="${newlib_cflags} -DREENTRANT_SYSCALLS_PROVIDED -DMALLOC_PROVIDED -DABORT_PROVIDED -fno-crossjumping -fno-optimize-sibling-calls" + ;; + sparc*) + machine_dir=sparc +diff -ruN newlib-1.19.0-orig/newlib/libc/include/assert.h newlib-1.19.0/newlib/libc/include/assert.h +--- newlib-1.19.0-orig/newlib/libc/include/assert.h 2008-07-17 16:56:51.000000000 -0400 ++++ newlib-1.19.0/newlib/libc/include/assert.h 2011-01-08 01:26:09.000000000 -0500 +@@ -13,8 +13,8 @@ + #ifdef NDEBUG /* required by ANSI standard */ + # define assert(__e) ((void)0) + #else +-# define assert(__e) ((__e) ? (void)0 : __assert_func (__FILE__, __LINE__, \ +- __ASSERT_FUNC, #__e)) ++# define assert(__e) ((__e) ? (void)0 : __assert (__FILE__, __LINE__, \ ++ #__e, (char *)0, __ASSERT_FUNC)) + + # ifndef __ASSERT_FUNC + /* Use g++'s demangled names in C++. */ +@@ -36,10 +36,8 @@ + # endif /* !__ASSERT_FUNC */ + #endif /* !NDEBUG */ + +-void _EXFUN(__assert, (const char *, int, const char *) +- _ATTRIBUTE ((__noreturn__))); +-void _EXFUN(__assert_func, (const char *, int, const char *, const char *) +- _ATTRIBUTE ((__noreturn__))); ++void _EXFUN(__assert, (const char *, int, const char *, const char *, ++ const char *) _ATTRIBUTE ((__noreturn__))); + + #ifdef __cplusplus + } +diff -ruN newlib-1.19.0-orig/newlib/libc/include/sys/types.h newlib-1.19.0/newlib/libc/include/sys/types.h +--- newlib-1.19.0-orig/newlib/libc/include/sys/types.h 2010-12-08 09:44:06.000000000 -0500 ++++ newlib-1.19.0/newlib/libc/include/sys/types.h 2011-01-08 00:10:45.000000000 -0500 +@@ -282,7 +282,7 @@ + + #if defined(__XMK__) + typedef unsigned int pthread_t; /* identify a thread */ +-#else ++#elif !defined(_arch_dreamcast) + typedef __uint32_t pthread_t; /* identify a thread */ + #endif + +@@ -333,6 +333,7 @@ + + #endif /* defined(__XMK__) || defined(__rtems__) */ + ++#ifndef _arch_dreamcast + #if defined(__XMK__) + typedef struct pthread_attr_s { + int contentionscope; +@@ -366,6 +367,7 @@ + } pthread_attr_t; + + #endif /* !defined(__XMK__) */ ++#endif + + #if defined(_POSIX_THREAD_PROCESS_SHARED) + /* NOTE: P1003.1c/D10, p. 81 defines following values for process_shared. */ +@@ -405,6 +407,7 @@ + } pthread_mutexattr_t; + + #else /* !defined(__XMK__) */ ++#ifndef _arch_dreamcast + typedef __uint32_t pthread_mutex_t; /* identify a mutex */ + + typedef struct { +@@ -421,10 +424,12 @@ + #endif + int recursive; + } pthread_mutexattr_t; ++#endif /* !_arch_dreamcast */ + #endif /* !defined(__XMK__) */ + + /* Condition Variables */ + ++#ifndef _arch_dreamcast + typedef __uint32_t pthread_cond_t; /* identify a condition variable */ + + typedef struct { +@@ -433,15 +438,18 @@ + int process_shared; /* allow this to be shared amongst processes */ + #endif + } pthread_condattr_t; /* a condition attribute object */ ++#endif + + /* Keys */ + ++#ifndef _arch_dreamcast + typedef __uint32_t pthread_key_t; /* thread-specific data keys */ + + typedef struct { + int is_initialized; /* is this structure initialized? */ + int init_executed; /* has the initialization routine been run? */ + } pthread_once_t; /* dynamic package initialization */ ++#endif + #else + #if defined (__CYGWIN__) + #include <cygwin/types.h> +diff -ruN newlib-1.19.0-orig/newlib/libc/stdlib/assert.c newlib-1.19.0/newlib/libc/stdlib/assert.c +--- newlib-1.19.0-orig/newlib/libc/stdlib/assert.c 2009-10-08 12:44:10.000000000 -0400 ++++ newlib-1.19.0/newlib/libc/stdlib/assert.c 2011-01-08 00:29:47.000000000 -0500 +@@ -47,6 +47,8 @@ + #include <stdlib.h> + #include <stdio.h> + ++#if 0 ++ + #ifndef HAVE_ASSERT_FUNC + /* func can be NULL, in which case no function information is given. */ + void +@@ -74,3 +76,4 @@ + __assert_func (file, line, NULL, failedexpr); + /* NOTREACHED */ + } ++#endif +diff -ruN newlib-1.19.0-orig/newlib/libc/sys/sh/ftruncate.c newlib-1.19.0/newlib/libc/sys/sh/ftruncate.c +--- newlib-1.19.0-orig/newlib/libc/sys/sh/ftruncate.c 2003-07-10 11:31:30.000000000 -0400 ++++ newlib-1.19.0/newlib/libc/sys/sh/ftruncate.c 2011-01-08 00:12:11.000000000 -0500 +@@ -2,8 +2,8 @@ + #include <sys/types.h> + #include "sys/syscall.h" + +-int ++/* int + ftruncate (int file, off_t length) + { + return __trap34 (SYS_ftruncate, file, length, 0); +-} ++} */ +diff -ruN newlib-1.19.0-orig/newlib/libc/sys/sh/sys/lock.h newlib-1.19.0/newlib/libc/sys/sh/sys/lock.h +--- newlib-1.19.0-orig/newlib/libc/sys/sh/sys/lock.h 1969-12-31 19:00:00.000000000 -0500 ++++ newlib-1.19.0/newlib/libc/sys/sh/sys/lock.h 2011-01-08 00:15:28.000000000 -0500 +@@ -0,0 +1,51 @@ ++/* KallistiOS ##version## ++ ++ lock_common.h ++ Copyright (C)2004 Dan Potter ++ ++*/ ++ ++#ifndef __SYS_LOCK_H__ ++#define __SYS_LOCK_H__ ++ ++typedef struct { ++ void * owner; ++ int nest; ++ volatile int lock; ++} __newlib_recursive_lock_t; ++ ++#define __NEWLIB_RECURSIVE_LOCK_INIT { (void *)0, 0, 0 } ++ ++typedef volatile int __newlib_lock_t; ++#define __NEWLIB_LOCK_INIT 0 ++ ++typedef __newlib_lock_t _LOCK_T; ++typedef __newlib_recursive_lock_t _LOCK_RECURSIVE_T; ++ ++#define __LOCK_INIT(class,lock) class _LOCK_T lock = __NEWLIB_LOCK_INIT; ++#define __LOCK_INIT_RECURSIVE(class,lock) class _LOCK_RECURSIVE_T lock = __NEWLIB_RECURSIVE_LOCK_INIT; ++#define __lock_init(lock) __newlib_lock_init(&(lock)) ++#define __lock_init_recursive(lock) __newlib_lock_init_recursive(&(lock)) ++#define __lock_close(lock) __newlib_lock_close(&(lock)) ++#define __lock_close_recursive(lock) __newlib_lock_close_recursive(&(lock)) ++#define __lock_acquire(lock) __newlib_lock_acquire(&(lock)) ++#define __lock_acquire_recursive(lock) __newlib_lock_acquire_recursive(&(lock)) ++#define __lock_try_acquire(lock) __newlib_lock_try_acquire(&(lock)) ++#define __lock_try_acquire_recursive(lock) __newlib_lock_try_acquire_recursive(&(lock)) ++#define __lock_release(lock) __newlib_lock_release(&(lock)) ++#define __lock_release_recursive(lock) __newlib_lock_release_recursive(&(lock)) ++ ++void __newlib_lock_init(__newlib_lock_t *); ++void __newlib_lock_close(__newlib_lock_t *); ++void __newlib_lock_acquire(__newlib_lock_t *); ++void __newlib_lock_try_acquire(__newlib_lock_t *); ++void __newlib_lock_release(__newlib_lock_t *); ++ ++void __newlib_lock_init_recursive(__newlib_recursive_lock_t *); ++void __newlib_lock_close_recursive(__newlib_recursive_lock_t *); ++void __newlib_lock_acquire_recursive(__newlib_recursive_lock_t *); ++void __newlib_lock_try_acquire_recursive(__newlib_recursive_lock_t *); ++void __newlib_lock_release_recursive(__newlib_recursive_lock_t *); ++ ++ ++#endif // __NEWLIB_LOCK_COMMON_H +diff -ruN newlib-1.19.0-orig/newlib/libc/sys/sh/syscalls.c newlib-1.19.0/newlib/libc/sys/sh/syscalls.c +--- newlib-1.19.0-orig/newlib/libc/sys/sh/syscalls.c 2008-01-21 19:24:45.000000000 -0500 ++++ newlib-1.19.0/newlib/libc/sys/sh/syscalls.c 2011-01-08 00:12:55.000000000 -0500 +@@ -1,228 +1,2 @@ +-#include <_ansi.h> +-#include <sys/types.h> +-#include <sys/stat.h> +-#include <sys/time.h> +-#include "sys/syscall.h" +-int errno; +- +-/* This is used by _sbrk. */ +-register char *stack_ptr asm ("r15"); +- +-int +-_read (int... [truncated message content] |