#define __device__ __attribute__((device))
__device__ void nvvm_ex2_sm75() {
#if __CUDA_ARCH__ >= 750
__nvvm_ex2_approx_f16(0.1f16);
__nvvm_ex2_approx_f16x2({0.1f16, 0.7f16});
#endif
}
__device__ void nvvm_min_max_sm80() {
#if __CUDA_ARCH__ >= 800
__nvvm_fmin_f16(0.1f16, 0.1f16);
__nvvm_fmin_ftz_f16(0.1f16, 0.1f16);
__nvvm_fmin_nan_f16(0.1f16, 0.1f16);
__nvvm_fmin_ftz_nan_f16(0.1f16, 0.1f16);
__nvvm_fmin_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmin_ftz_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmin_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmin_ftz_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_f16(0.1f16, 0.1f16);
__nvvm_fmax_ftz_f16(0.1f16, 0.1f16);
__nvvm_fmax_nan_f16(0.1f16, 0.1f16);
__nvvm_fmax_ftz_nan_f16(0.1f16, 0.1f16);
__nvvm_fmax_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_ftz_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_ftz_nan_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
#endif
}
__device__ void nvvm_fma_f16_f16x2_sm80() {
#if __CUDA_ARCH__ >= 800
__nvvm_fma_rn_relu_f16(0.1f16, 0.1f16, 0.1f16);
__nvvm_fma_rn_ftz_relu_f16(0.1f16, 0.1f16, 0.1f16);
__nvvm_fma_rn_relu_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16},
{0.1f16, 0.7f16});
__nvvm_fma_rn_ftz_relu_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16},
{0.1f16, 0.7f16});
#endif
}
__device__ void nvvm_fma_f16_f16x2_sm53() {
#if __CUDA_ARCH__ >= 530
__nvvm_fma_rn_f16(0.1f16, 0.1f16, 0.1f16);
__nvvm_fma_rn_ftz_f16(0.1f16, 0.1f16, 0.1f16);
__nvvm_fma_rn_sat_f16(0.1f16, 0.1f16, 0.1f16);
__nvvm_fma_rn_ftz_sat_f16(0.1f16, 0.1f16, 0.1f16);
__nvvm_fma_rn_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16},
{0.1f16, 0.7f16});
__nvvm_fma_rn_ftz_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16},
{0.1f16, 0.7f16});
__nvvm_fma_rn_sat_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16},
{0.1f16, 0.7f16});
__nvvm_fma_rn_ftz_sat_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16},
{0.1f16, 0.7f16});
#endif
}
__device__ void nvvm_min_max_sm86() {
#if __CUDA_ARCH__ >= 860
__nvvm_fmin_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmin_ftz_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmin_nan_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmin_ftz_nan_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmin_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmin_ftz_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmin_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmin_ftz_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmax_ftz_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmax_nan_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmax_ftz_nan_xorsign_abs_f16(0.1f16, 0.1f16);
__nvvm_fmax_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_ftz_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
__nvvm_fmax_ftz_nan_xorsign_abs_f16x2({0.1f16, 0.7f16}, {0.1f16, 0.7f16});
#endif
}