fp16半精度浮点数转成float类型------C/C++

在深度学习算法模型推理时,会遇到fp16类型,但是我们的c语言中没有这种类型,直接转成unsigned short又会丧失精度,因此我们首先将FP16转成float类型,再进行计算。

方法1:

typedef unsigned short ushort;//占用2个字节
typedef unsigned int uint;    //占用4个字节

uint as_uint(const float x) {
    return *(uint*)&x;
}
float as_float(const uint x) {
    return *(float*)&x;
}

float half_to_float(const ushort x) { // IEEE-754 16-bit floating-point format (without infinity): 1-5-10, exp-15, +-131008.0, +-6.1035156E-5, +-5.9604645E-8, 3.311 digits
    const uint e = (x&0x7C00)>>10; // exponent
    const uint m = (x&0x03FF)<<13; // mantissa
    const uint v = as_uint((float)m)>>23; // evil log2 bit hack to count leading zeros in denormalized format
    return as_float((x&0x8000)<<16 | (e!=0)*((e+112)<<23|m) | ((e==0)&(m!=0))*((v-37)<<23|((m<<(150-v))&0x007FE000))); // sign : normalized : denormalized
}
ushort float_to_half(const float x) { // IEEE-754 16-bit floating-point format (without infinity): 1-5-10, exp-15, +-131008.0, +-6.1035156E-5, +-5.9604645E-8, 3.311 digits
    const uint b = as_uint(x)+0x00001000; // round-to-nearest-even: add last bit after truncated mantissa
    const uint e = (b&0x7F800000)>>23; // exponent
    const uint m = b&0x007FFFFF; // mantissa; in line below: 0x007FF000 = 0x00800000-0x00001000 = decimal indicator flag - initial rounding
    return (b&0x80000000)>>16 | (e>112)*((((e-112)<<10)&0x7C00)|m>>13) | ((e<113)&(e>101))*((((0x007FF000+m)>>(125-e))+1)>>1) | (e>143)*0x7FFF; // sign : normalized : denormalized : saturate
}

方法2:

float cpu_half2float(unsigned short x)
{
    unsigned sign = ((x >> 15) & 1);
    unsigned exponent = ((x >> 10) & 0x1f);
    unsigned mantissa = ((x & 0x3ff) << 13);
    if (exponent == 0x1f) {  /* NaN or Inf */
        mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0);
        exponent = 0xff;
    } else if (!exponent) {  /* Denorm or Zero */
        if (mantissa) {
            unsigned int msb;
            exponent = 0x71;
            do {
                msb = (mantissa & 0x400000);
                mantissa <<= 1;  /* normalize */
                --exponent;
            } while (!msb);
            mantissa &= 0x7fffff;  /* 1.mantissa is implicit */
        }
    } else {
        exponent += 0x70;
    }
    int temp = ((sign << 31) | (exponent << 23) | mantissa);

    return *((float*)((void*)&temp));
}

3 demo

 下面的demo中,yolov5_outputs[0].buf是void *类型的,void *类型不能++,因此先转换成ushort*类型。

    ...
    ...   
    float *data0 = (float*)malloc(4 * output_attrs[0].n_elems);
    float *data1 = (float*)malloc(4 * output_attrs[1].n_elems);
    float *data2 = (float*)malloc(4 * output_attrs[2].n_elems);
    unsigned short *temp0 = (ushort*)yolov5_outputs[0].buf;
    unsigned short *temp1 = (ushort*)yolov5_outputs[1].buf;
    unsigned short *temp2 = (ushort*)yolov5_outputs[2].buf;

    for(int i=0; i < output_attrs[0].n_elems;i++)
    {
        data0[i] = half_to_float(temp0[i]);
    }
    for(int i=0; i < output_attrs[1].n_elems;i++)
    {
       data1[i] = half_to_float(temp1[i]);
    }
    for(int i=0; i < output_attrs[2].n_elems;i++)
    {
       data2[i] = half_to_float(temp2[i]);
    }
    ...
    ...

参考文献:

https://github.com/PrincetonVision/marvin/blob/master/tools/tensorIO_matlab/half2float.cpp

猜你喜欢

转载自blog.csdn.net/u013171226/article/details/123503174
今日推荐