blob: c20e5382e77329e20a6bf9280e370b6fecaaa287 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
#define NERV_GENERIC_CUKERNEL
#include "cumatrix.h"
#ifdef __NERV_FUTURE_CUDA_7
__device__ double atomicAdd_nvidia(double* address, double val) {
/* nvidia provided this implementation
atmoicAdd is not included in CUDA for double */
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicAdd_nvidia(float* address, float val) {
/* nvidia provided this implementation
I tried the included atomocAdd, but the select_liner layer result seems
unreproduceable, but sadly, even if I used this implementation, the
select_linear layer result is still unreproduceable */
int* address_as_ull = (int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__float_as_int(val +
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
#define cudak_(NAME) cudak_float_ ## NAME
#define MATRIX_USE_FLOAT
#include "generic/elem_type.h"
#include "generic/cukernel.cu"
#undef cudak_
#undef MATRIX_USE_FLOAT
#undef MATRIX_ELEM
#undef MATRIX_ELEM_PTR
#undef MATRIX_ELEM_PTR_BASE
#undef MATRIX_ELEM_FMT
#undef MATRIX_ELEM_WRITE_FMT
#define cudak_(NAME) cudak_double_ ## NAME
#define MATRIX_USE_DOUBLE
#include "generic/elem_type.h"
#include "generic/cukernel.cu"
|