Cross platform compilation, deprecated function & type casting
ywgATustcbbs opened this issue · comments
I just simply downloaded your code and converted to VS2017 solution. There errors and warnings. Iam using vs as my IDE so I could commit a pr. I will wrote my changes here.
-
zconf.h is actually not used in this code, thus it can be removed. Which is indeed used is time.h. Using time.h is more friendly in cross platform compilation.
-
Use #ifdef instead of #if defined to improve cross platform experiences. In the begining of layers.h
#ifdef USE_SIMD
#ifdef AVX2
#include <emmintrin.h>
#include <smmintrin.h>
#include <immintrin.h>
//#warning "Using AVX2 extensions."
#endif
#ifdef SSE4_2
#include <emmintrin.h>
#include <smmintrin.h>
//#warning "Using SSE4.2 extensions."
#endif
#else
#warning "Using no SIMD extensions."
#endif
and another place in that file:
#ifdef USE_SIMD
#ifdef AVX2
static inline __m256i set8ChunkSeeds(int ws, __m256i xs, __m256i zs)
{
__m256i _out = _mm256_set1_epi32(ws);
__m256i _mul = _mm256_set1_epi32(1284865837);
__m256i _add = _mm256_set1_epi32(4150755663);
_out = _mm256_add_epi32(xs, _mm256_mullo_epi32(_out, _mm256_add_epi32(_add, _mm256_mullo_epi32(_out, _mul))));
_out = _mm256_add_epi32(zs, _mm256_mullo_epi32(_out, _mm256_add_epi32(_add, _mm256_mullo_epi32(_out, _mul))));
_out = _mm256_add_epi32(xs, _mm256_mullo_epi32(_out, _mm256_add_epi32(_add, _mm256_mullo_epi32(_out, _mul))));
return _mm256_add_epi32(zs, _mm256_mullo_epi32(_out, _mm256_add_epi32(_add, _mm256_mullo_epi32(_out, _mul))));
}
static inline __m256i mc8NextInt(__m256i* cs, int ws, int mask)
{
__m256i _and = _mm256_set1_epi32(mask);
__m256i _ret = _mm256_and_si256(_and, _mm256_srli_epi32(*cs, 24));
*cs = _mm256_add_epi32(_mm256_set1_epi32(ws), _mm256_mullo_epi32(*cs, _mm256_add_epi32(_mm256_set1_epi32(4150755663), _mm256_mullo_epi32(*cs, _mm256_set1_epi32(1284865837)))));
return _mm256_add_epi32(_ret, _mm256_and_si256(_and, _mm256_cmpgt_epi32(_mm256_set1_epi32(0), _ret)));;
}
static inline __m256i select8Random2(__m256i* cs, int ws, __m256i a1, __m256i a2)
{
__m256i _cmp = _mm256_cmpeq_epi32(_mm256_set1_epi32(0), mc8NextInt(cs, ws, 0x1));
return _mm256_or_si256(_mm256_and_si256(_cmp, a1), _mm256_andnot_si256(_cmp, a2));
}
static inline __m256i select8Random4(__m256i* cs, int ws, __m256i a1, __m256i a2, __m256i a3, __m256i a4)
{
__m256i _val = mc8NextInt(cs, ws, 0x3);
__m256i _v2 = _mm256_set1_epi32(2);
__m256i _cmp1 = _mm256_cmpeq_epi32(_val, _mm256_set1_epi32(0));
__m256i _cmp2 = _mm256_cmpeq_epi32(_v2, _val);
__m256i _cmp3 = _mm256_cmpgt_epi32(_v2, _val);
return _mm256_or_si256(
_mm256_and_si256(_cmp3, _mm256_or_si256(_mm256_and_si256(_cmp1, a1), _mm256_andnot_si256(_cmp1, a2))),
_mm256_andnot_si256(_cmp3, _mm256_or_si256(_mm256_and_si256(_cmp2, a3), _mm256_andnot_si256(_cmp2, a4)))
);
}
static inline __m256i select8ModeOrRandom(__m256i* cs, int ws, __m256i a1, __m256i a2, __m256i a3, __m256i a4)
{
__m256i _cmp1 = _mm256_cmpeq_epi32(a1, a2);
__m256i _cmp2 = _mm256_cmpeq_epi32(a1, a3);
__m256i _cmp3 = _mm256_cmpeq_epi32(a1, a4);
__m256i _cmp4 = _mm256_cmpeq_epi32(a2, a3);
__m256i _cmp5 = _mm256_cmpeq_epi32(a2, a4);
__m256i _cmp6 = _mm256_cmpeq_epi32(a3, a4);
__m256i _isa1 = _mm256_or_si256(
_mm256_andnot_si256(_cmp6, _cmp1),
_mm256_or_si256 (
_mm256_andnot_si256(_cmp5, _cmp2),
_mm256_andnot_si256(_cmp4, _cmp3)
)
);
__m256i _isa2 = _mm256_or_si256(
_mm256_andnot_si256(_cmp3, _cmp4),
_mm256_andnot_si256(_cmp2, _cmp5)
);
__m256i _isa3 = _mm256_andnot_si256(_cmp1, _cmp6);
return _mm256_or_si256(
_mm256_andnot_si256(
_mm256_or_si256(
_isa1,
_mm256_or_si256(_isa2, _isa3)
),
select8Random4(cs, ws, a1, a2, a3, a4)
),
_mm256_or_si256(
_mm256_and_si256(_isa1, a1),
_mm256_or_si256(
_mm256_and_si256(_isa2, a2),
_mm256_and_si256(_isa3, a3)
)
)
);
}
#endif __AVX2__
#ifdef __SSE4_2__
static inline __m128i set4ChunkSeeds(int ws, __m128i xs, __m128i zs)
{
__m128i out = _mm_set1_epi32(ws);
__m128i mul = _mm_set1_epi32(1284865837);
__m128i add = _mm_set1_epi32(4150755663);
out = _mm_add_epi32(xs, _mm_mullo_epi32(out, _mm_add_epi32(add, _mm_mullo_epi32(out, mul))));
out = _mm_add_epi32(zs, _mm_mullo_epi32(out, _mm_add_epi32(add, _mm_mullo_epi32(out, mul))));
out = _mm_add_epi32(xs, _mm_mullo_epi32(out, _mm_add_epi32(add, _mm_mullo_epi32(out, mul))));
return _mm_add_epi32(zs, _mm_mullo_epi32(out, _mm_add_epi32(add, _mm_mullo_epi32(out, mul))));
}
static inline __m128i mc4NextInt(__m128i* cs, int ws, int mask)
{
__m128i and = _mm_set1_epi32(mask);
__m128i ret = _mm_and_si128(and, _mm_srli_epi32(*cs, 24));
*cs = _mm_add_epi32( _mm_set1_epi32(ws), _mm_mullo_epi32(*cs, _mm_add_epi32(_mm_set1_epi32(4150755663), _mm_mullo_epi32(*cs, _mm_set1_epi32(1284865837)))));
return _mm_add_epi32(ret, _mm_and_si128(and, _mm_cmplt_epi32(ret, _mm_set1_epi32(0))));;
}
static inline __m128i select4Random2(__m128i* cs, int ws, __m128i a1, __m128i a2)
{
__m128i cmp = _mm_cmpeq_epi32(_mm_set1_epi32(0), mc4NextInt(cs, ws, 0x1));
return _mm_or_si128(_mm_and_si128(cmp, a1), _mm_andnot_si128(cmp, a2));
}
static inline __m128i select4Random4(__m128i* cs, int ws, __m128i a1, __m128i a2, __m128i a3, __m128i a4)
{
__m128i val = mc4NextInt(cs, ws, 0x3);
__m128i v2 = _mm_set1_epi32(2);
__m128i cmp1 = _mm_cmpeq_epi32(val, _mm_set1_epi32(0));
__m128i cmp2 = _mm_cmpeq_epi32(val, v2);
__m128i cmp3 = _mm_cmplt_epi32(val, v2);
return _mm_or_si128(
_mm_and_si128(cmp3, _mm_or_si128(_mm_and_si128(cmp1, a1), _mm_andnot_si128(cmp1, a2))),
_mm_andnot_si128(cmp3, _mm_or_si128(_mm_and_si128(cmp2, a3), _mm_andnot_si128(cmp2, a4)))
);
}
static inline __m128i select4ModeOrRandom(__m128i* cs, int ws, __m128i a1, __m128i a2, __m128i a3, __m128i a4)
{
//((a == b)&(c != d) | (a == c)&(b != d) | (a == d)&(b != c))&a | ((b == c)&(a != d) | (b == d)&(a != c))&b | ((c == d)&(a != b))&c
__m128i cmp1 = _mm_cmpeq_epi32(a1, a2);
__m128i cmp2 = _mm_cmpeq_epi32(a1, a3);
__m128i cmp3 = _mm_cmpeq_epi32(a1, a4);
__m128i cmp4 = _mm_cmpeq_epi32(a2, a3);
__m128i cmp5 = _mm_cmpeq_epi32(a2, a4);
__m128i cmp6 = _mm_cmpeq_epi32(a3, a4);
__m128i isa1 = _mm_or_si128(
_mm_andnot_si128(cmp6, cmp1),
_mm_or_si128 (
_mm_andnot_si128(cmp5, cmp2),
_mm_andnot_si128(cmp4, cmp3)
)
);
__m128i isa2 = _mm_or_si128(
_mm_andnot_si128(cmp3, cmp4),
_mm_andnot_si128(cmp2, cmp5)
);
__m128i isa3 = _mm_andnot_si128(cmp1, cmp6);
return _mm_or_si128(
_mm_andnot_si128(
_mm_or_si128(
isa1,
_mm_or_si128(isa2, isa3)
),
select4Random4(cs, ws, a1, a2, a3, a4)
),
_mm_or_si128(
_mm_and_si128(isa1, a1),
_mm_or_si128(
_mm_and_si128(isa2, a2),
_mm_and_si128(isa3, a3)
)
)
);
}
#endif __SSE4_2__
#else
static inline int selectRandom2(Layer *l, int a1, int a2)
{
int i = mcNextInt(l, 2);
return i == 0 ? a1 : a2;
}
static inline int selectRandom4(Layer *l, int a1, int a2, int a3, int a4)
{
int i = mcNextInt(l, 4);
return i == 0 ? a1 : i == 1 ? a2 : i == 2 ? a3 : a4;
}
static inline int selectModeOrRandom(Layer *l, int a1, int a2, int a3, int a4)
{
int rndarg = selectRandom4(l, a1, a2, a3, a4);
if (a2 == a3 && a3 == a4) return a2;
if (a1 == a2 && a1 == a3) return a1;
if (a1 == a2 && a1 == a4) return a1;
if (a1 == a3 && a1 == a4) return a1;
if (a1 == a2 && a3 != a4) return a1;
if (a1 == a3 && a2 != a4) return a1;
if (a1 == a4 && a2 != a3) return a1;
if (a2 == a3 && a1 != a4) return a2;
if (a2 == a4 && a1 != a3) return a2;
if (a3 == a4 && a1 != a2) return a3;
return rndarg;
}
#endif
Do note that I renamed some variables (and add mul) to avoid conflict with C++ keywords when compiled with C++ code
-
Changed fopen to fopen_s, same to printf and other deprecated functions
-
Explicitly type casting
-
In printf_s, change %ld to %lld since you used long long int
Hi, this program was just pieced together, but i can for sure fix it correctly, i am just surprised i didnt receive any mail for this issues at all.