diff --git a/graphics/openimageio/patches/patch-src_include_tbb_machine_gcc_generic_h b/graphics/openimageio/patches/patch-src_include_tbb_machine_gcc_generic_h new file mode 100644 index 00000000000..354f41b244d --- /dev/null +++ b/graphics/openimageio/patches/patch-src_include_tbb_machine_gcc_generic_h @@ -0,0 +1,131 @@ +$OpenBSD: patch-src_include_tbb_machine_gcc_generic_h,v 1.1 2012/06/13 11:32:39 pascal Exp $ +--- src/include/tbb/machine/gcc_generic.h.orig Mon Jun 11 11:57:42 2012 ++++ src/include/tbb/machine/gcc_generic.h Mon Jun 11 12:01:02 2012 +@@ -0,0 +1,127 @@ ++/* ++ Copyright 2005-2012 Intel Corporation. All Rights Reserved. ++ ++ This file is part of Threading Building Blocks. ++ ++ Threading Building Blocks is free software; you can redistribute it ++ and/or modify it under the terms of the GNU General Public License ++ version 2 as published by the Free Software Foundation. ++ ++ Threading Building Blocks is distributed in the hope that it will be ++ useful, but WITHOUT ANY WARRANTY; without even the implied warranty ++ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with Threading Building Blocks; if not, write to the Free Software ++ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ++ ++ As a special exception, you may use this file as part of a free software ++ library without restriction. Specifically, if other files instantiate ++ templates or use macros or inline functions from this file, or you compile ++ this file and link it with other files to produce an executable, this ++ file does not by itself cause the resulting executable to be covered by ++ the GNU General Public License. This exception does not however ++ invalidate any other reasons why the executable file might be covered by ++ the GNU General Public License. ++*/ ++ ++#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H) ++#error Do not #include this internal file directly; use public TBB headers instead. ++#endif ++ ++#define __TBB_machine_gcc_generic_H ++ ++#include ++#include ++ ++#include ++ ++#define __TBB_WORDSIZE __SIZEOF_POINTER__ ++ ++// For some reason straight mapping does not work on mingw ++#if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ ++ #define __TBB_BIG_ENDIAN 0 ++#elif __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ ++ #define __TBB_BIG_ENDIAN 1 ++#else ++#error Unsupported endianness ++#endif ++ ++/** As this generic implementation has absolutely no information about underlying ++ hardware, its performance most likely will be sub-optimal because of full memory ++ fence usages where a more lightweight synchronization means (or none at all) ++ could suffice. Thus if you use this header to enable TBB on a new platform, ++ consider forking it and relaxing below helpers as appropriate. **/ ++#define __TBB_acquire_consistency_helper() __sync_synchronize() ++#define __TBB_release_consistency_helper() __sync_synchronize() ++#define __TBB_full_memory_fence() __sync_synchronize() ++#define __TBB_control_consistency_helper() __sync_synchronize() ++ ++#define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ ++inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ ++ return __sync_val_compare_and_swap(reinterpret_cast(ptr),comparand,value); \ ++} \ ++ \ ++inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ ++ return __sync_fetch_and_add(reinterpret_cast(ptr),value); \ ++} \ ++ ++__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t) ++__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t) ++__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t) ++__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t) ++ ++#undef __TBB_MACHINE_DEFINE_ATOMICS ++ ++namespace tbb{ namespace internal { namespace gcc_builtins { ++ int clz(unsigned int x){ return __builtin_clz(x);}; ++ int clz(unsigned long int x){ return __builtin_clzl(x);}; ++}}} ++//gcc __builtin_clz builtin count _number_ of leading zeroes ++static inline intptr_t __TBB_machine_lg( uintptr_t x ) { ++ return sizeof(x)*8 - tbb::internal::gcc_builtins::clz(x) -1 ; ++} ++ ++static inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) { ++ __sync_fetch_and_or(reinterpret_cast(ptr),addend); ++} ++ ++static inline void __TBB_machine_and( volatile void *ptr, uintptr_t addend ) { ++ __sync_fetch_and_and(reinterpret_cast(ptr),addend); ++} ++ ++ ++typedef unsigned char __TBB_Flag; ++ ++typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; ++ ++inline bool __TBB_machine_try_lock_byte( __TBB_atomic_flag &flag ) { ++ return __sync_lock_test_and_set(&flag,1)==0; ++} ++ ++inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag , __TBB_Flag) { ++ __sync_lock_release(&flag); ++} ++ ++// Machine specific atomic operations ++#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) ++#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) ++ ++#define __TBB_TryLockByte __TBB_machine_try_lock_byte ++#define __TBB_UnlockByte __TBB_machine_unlock_byte ++ ++// Definition of other functions ++#define __TBB_Log2(V) __TBB_machine_lg(V) ++ ++#define __TBB_USE_GENERIC_FETCH_STORE 1 ++#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 ++#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 ++ ++#if __TBB_WORDSIZE==4 ++ #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 ++#endif ++ ++#define __TBB_Yield() sched_yield() ++#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) ++#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) diff --git a/graphics/openimageio/patches/patch-src_include_tbb_machine_sunos_sparc_h b/graphics/openimageio/patches/patch-src_include_tbb_machine_sunos_sparc_h new file mode 100644 index 00000000000..8f4df919668 --- /dev/null +++ b/graphics/openimageio/patches/patch-src_include_tbb_machine_sunos_sparc_h @@ -0,0 +1,217 @@ +$OpenBSD: patch-src_include_tbb_machine_sunos_sparc_h,v 1.1 2012/06/13 11:32:39 pascal Exp $ +--- src/include/tbb/machine/sunos_sparc.h.orig Mon Jun 11 11:57:42 2012 ++++ src/include/tbb/machine/sunos_sparc.h Mon Jun 11 12:00:47 2012 +@@ -0,0 +1,213 @@ ++/* ++ Copyright 2005-2012 Intel Corporation. All Rights Reserved. ++ ++ This file is part of Threading Building Blocks. ++ ++ Threading Building Blocks is free software; you can redistribute it ++ and/or modify it under the terms of the GNU General Public License ++ version 2 as published by the Free Software Foundation. ++ ++ Threading Building Blocks is distributed in the hope that it will be ++ useful, but WITHOUT ANY WARRANTY; without even the implied warranty ++ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with Threading Building Blocks; if not, write to the Free Software ++ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ++ ++ As a special exception, you may use this file as part of a free software ++ library without restriction. Specifically, if other files instantiate ++ templates or use macros or inline functions from this file, or you compile ++ this file and link it with other files to produce an executable, this ++ file does not by itself cause the resulting executable to be covered by ++ the GNU General Public License. This exception does not however ++ invalidate any other reasons why the executable file might be covered by ++ the GNU General Public License. ++*/ ++ ++ ++#if !defined(__TBB_machine_H) || defined(__TBB_machine_sunos_sparc_H) ++#error Do not #include this internal file directly; use public TBB headers instead. ++#endif ++ ++#define __TBB_machine_sunos_sparc_H ++ ++#include ++#include ++ ++#include ++ ++#define __TBB_WORDSIZE 8 ++#define __TBB_BIG_ENDIAN 1 ++ ++/** To those working on SPARC hardware. Consider relaxing acquire and release ++ consistency helpers to no-op (as this port covers TSO mode only). **/ ++#define __TBB_compiler_fence() __asm__ __volatile__ ("": : :"memory") ++#define __TBB_control_consistency_helper() __TBB_compiler_fence() ++#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() ++#define __TBB_release_consistency_helper() __TBB_compiler_fence() ++#define __TBB_full_memory_fence() __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory") ++ ++//-------------------------------------------------- ++// Compare and swap ++//-------------------------------------------------- ++ ++/** ++ * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr ++ * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand ++ * @param value value to assign *ptr to if *ptr==comparand ++ * @param comparand value to compare with *ptr ++ ( @return value originally in memory at ptr, regardless of success ++*/ ++static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){ ++ int32_t result; ++ __asm__ __volatile__( ++ "cas\t[%5],%4,%1" ++ : "=m"(*(int32_t *)ptr), "=r"(result) ++ : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) ++ : "memory"); ++ return result; ++} ++ ++/** ++ * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr ++ * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand ++ * @param value value to assign *ptr to if *ptr==comparand ++ * @param comparand value to compare with *ptr ++ ( @return value originally in memory at ptr, regardless of success ++ */ ++static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){ ++ int64_t result; ++ __asm__ __volatile__( ++ "casx\t[%5],%4,%1" ++ : "=m"(*(int64_t *)ptr), "=r"(result) ++ : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) ++ : "memory"); ++ return result; ++} ++ ++//--------------------------------------------------- ++// Fetch and add ++//--------------------------------------------------- ++ ++/** ++ * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity ++ * @param ptr pointer to value to add addend to ++ * @param addened value to add to *ptr ++ * @return value at ptr before addened was added ++ */ ++static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){ ++ int32_t result; ++ __asm__ __volatile__ ( ++ "0:\t add\t %3, %4, %0\n" // do addition ++ "\t cas\t [%2], %3, %0\n" // cas to store result in memory ++ "\t cmp\t %3, %0\n" // check if value from memory is original ++ "\t bne,a,pn\t %%icc, 0b\n" // if not try again ++ "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added ++ : "=&r"(result), "=m"(*(int32_t *)ptr) ++ : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr) ++ : "ccr", "memory"); ++ return result; ++} ++ ++/** ++ * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity ++ * @param ptr pointer to value to add addend to ++ * @param addened value to add to *ptr ++ * @return value at ptr before addened was added ++ */ ++static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){ ++ int64_t result; ++ __asm__ __volatile__ ( ++ "0:\t add\t %3, %4, %0\n" // do addition ++ "\t casx\t [%2], %3, %0\n" // cas to store result in memory ++ "\t cmp\t %3, %0\n" // check if value from memory is original ++ "\t bne,a,pn\t %%xcc, 0b\n" // if not try again ++ "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added ++ : "=&r"(result), "=m"(*(int64_t *)ptr) ++ : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) ++ : "ccr", "memory"); ++ return result; ++} ++ ++//-------------------------------------------------------- ++// Logarithm (base two, integer) ++//-------------------------------------------------------- ++ ++static inline int64_t __TBB_machine_lg( uint64_t x ) { ++ uint64_t count; ++ // one hot encode ++ x |= (x >> 1); ++ x |= (x >> 2); ++ x |= (x >> 4); ++ x |= (x >> 8); ++ x |= (x >> 16); ++ x |= (x >> 32); ++ // count 1's ++ __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); ++ return count-1; ++} ++ ++//-------------------------------------------------------- ++ ++static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) { ++ __asm__ __volatile__ ( ++ "0:\t or\t %2, %3, %%g1\n" // do addition ++ "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory ++ "\t cmp\t %2, %%g1\n" // check if value from memory is original ++ "\t bne,a,pn\t %%xcc, 0b\n" // if not try again ++ "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added ++ : "=m"(*(int64_t *)ptr) ++ : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) ++ : "ccr", "g1", "memory"); ++} ++ ++static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) { ++ __asm__ __volatile__ ( ++ "0:\t and\t %2, %3, %%g1\n" // do addition ++ "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory ++ "\t cmp\t %2, %%g1\n" // check if value from memory is original ++ "\t bne,a,pn\t %%xcc, 0b\n" // if not try again ++ "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added ++ : "=m"(*(int64_t *)ptr) ++ : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) ++ : "ccr", "g1", "memory"); ++} ++ ++ ++static inline void __TBB_machine_pause( int32_t delay ) { ++ // do nothing, inlined, doesnt matter ++} ++ ++// put 0xff in memory location, return memory value, ++// generic trylockbyte puts 0x01, however this is fine ++// because all that matters is that 0 is unlocked ++static inline bool __TBB_machine_trylockbyte(unsigned char &flag){ ++ unsigned char result; ++ __asm__ __volatile__ ( ++ "ldstub\t [%2], %0\n" ++ : "=r"(result), "=m"(flag) ++ : "r"(&flag), "m"(flag) ++ : "memory"); ++ return result == 0; ++} ++ ++#define __TBB_USE_GENERIC_PART_WORD_CAS 1 ++#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 ++#define __TBB_USE_GENERIC_FETCH_STORE 1 ++#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 ++#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 ++ ++#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) ++#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) ++ ++// Definition of other functions ++#define __TBB_Pause(V) __TBB_machine_pause(V) ++#define __TBB_Log2(V) __TBB_machine_lg(V) ++ ++#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) ++ ++#define __TBB_Yield() sched_yield() ++#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) ++#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) diff --git a/graphics/openimageio/patches/patch-src_include_tbb_tbb_machine_h b/graphics/openimageio/patches/patch-src_include_tbb_tbb_machine_h index bef9ee39652..3dea7a01b8b 100644 --- a/graphics/openimageio/patches/patch-src_include_tbb_tbb_machine_h +++ b/graphics/openimageio/patches/patch-src_include_tbb_tbb_machine_h @@ -1,6 +1,6 @@ -$OpenBSD: patch-src_include_tbb_tbb_machine_h,v 1.2 2012/05/26 11:01:55 pascal Exp $ +$OpenBSD: patch-src_include_tbb_tbb_machine_h,v 1.3 2012/06/13 11:32:39 pascal Exp $ --- src/include/tbb/tbb_machine.h.orig Wed May 2 23:01:55 2012 -+++ src/include/tbb/tbb_machine.h Sat May 26 11:01:25 2012 ++++ src/include/tbb/tbb_machine.h Sat May 26 17:38:14 2012 @@ -53,7 +53,7 @@ extern "C" __declspec(dllimport) int __stdcall SwitchT #pragma managed(pop) #endif @@ -10,12 +10,16 @@ $OpenBSD: patch-src_include_tbb_tbb_machine_h,v 1.2 2012/05/26 11:01:55 pascal E #if __i386__ #include "machine/linux_ia32.h" -@@ -61,6 +61,8 @@ extern "C" __declspec(dllimport) int __stdcall SwitchT +@@ -61,6 +61,12 @@ extern "C" __declspec(dllimport) int __stdcall SwitchT #include "machine/linux_intel64.h" #elif __ia64__ #include "machine/linux_ia64.h" +#elif __powerpc__ +#include "machine/mac_ppc.h" ++#elif __sparc64__ ++#include "machine/sunos_sparc.h" ++#else ++#include "machine/gcc_generic.h" #endif #elif __APPLE__