Java实现混合高斯聚类

用Java重新了混合高斯分类。

写完测试用Vectors.dense( 1.0,2.0,3.0 ), Vectors.dense( 2.0,2.0,3.0 ),Vectors.dense( 3.0,2.0,3.0 )做测试,参数为 TestGaussianMixture gaussian = new TestGaussianMixture( 2, 0.01,2,10);然后看model中的weights。

测试结果为5.5951692130668595E-8, 0.9999999440483078,可是用GaussianMixture计算结果5.5951692130668595E-8,0.9999999440483078

结果不一样,因为我机器不能调试scala代码,直接看,看了一遍又一遍,几天也不知道错在哪里。然后用反射一个一个替换写的方法,还是没有头绪。然后给GaussianMixture设置初始的model(和我们计算的一样),突然GaussianMixture计算结果就是5.5951692130668595E-8, 0.9999999440483078,问题就出在初始化的model上。

初始化就两个方法,都没有问题,但是当用反射的方法替代vectorMean 方法时,得到了正确的值。又仔细看了一遍,代码没有问题,是损失了精度。

原来的代码为

private static Vector vectorMean( List<Vector> list )
{
int size = list.size( );
Vector retValue = Vectors.zeros( list.get( 0 ).size( ) );
for ( Vector v : list )
{
BLAS.axpy( 1.0, v, retValue );
}
BLAS.scal( 1.0 / size, retValue );
return retValue;
}

在计算scal的时候14/5 = 2.8000000003,用反射调用GaussianMixture的这个方法时就是2.8.

然后改成

private static Vector vectorMean( List<Vector> list )
{
int size = list.size( );
Vector retValue = Vectors.zeros( list.get( 0 ).size( ) );
for ( Vector v : list )
{
BLAS.axpy( 1.0, v, retValue );
}
double[] ds = retValue.toArray( );
for (int i=0; i< ds.length; i++)
{
ds[i] = div( ds[i], size, 1 );
}
return retValue;
}

private static double div(double d1,double d2,int scale)
{  
        BigDecimal b1=new BigDecimal(Double.toString(d1));  
        BigDecimal b2=new BigDecimal(Double.toString(d2));  
        return b1.divide(b2,scale,BigDecimal.ROUND_HALF_UP).doubleValue();  
    }


结果就正确了。

过程几乎什么都怀疑了,但是问题就出在这个简单的方法上。


源代码如下


import java.io.Serializable;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;


import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.mllib.linalg.BLAS;
import org.apache.spark.mllib.linalg.DenseMatrix;
import org.apache.spark.mllib.linalg.Matrices;
import org.apache.spark.mllib.linalg.Matrix;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.stat.distribution.MultivariateGaussian;
import org.apache.spark.mllib.util.MLUtils$;
import org.apache.spark.rdd.RDD;


import scala.Array;
import scala.Tuple2;
import scala.Tuple3;
import scala.collection.JavaConverters;
import scala.reflect.ClassManifestFactory;
import scala.runtime.AbstractFunction0;
import scala.runtime.AbstractFunction1;
import scala.runtime.AbstractFunction2;


/**
 * 
 */


public class GaoGaussianMixture implements Serializable
{


private int k;
private double convergenceTol;
private int maxIterations;
private long seed;
private int nSamples = 5;

public GaoGaussianMixture()
{
this(2, 0.01, 100, new Random().nextInt( ));
}

public GaoGaussianMixture( int k, double convergenceTol, int maxIterations, long seed )
{
super( );
this.k = k;
this.convergenceTol = convergenceTol;
this.maxIterations = maxIterations;
this.seed = seed;
}


public GaoGaussianMixtureModel run( JavaRDD<Vector> data )
{
return run( data.rdd( ) );
}

public GaoGaussianMixtureModel run( RDD<Vector> data )
{
SparkContext sc = data.sparkContext( );
data.cache( );
int d = data.first( ).size( );
boolean shouldDistributeGaussians = shouldDistributeGaussians( k, d );


List<Vector> samples1 = data.toJavaRDD( ).takeSample( true, k
* nSamples, seed );

List<Vector> samples = new ArrayList<Vector>(  );
for (Vector v:samples1)
{
samples.add( v.copy( ) );
}
Double[] weights = (Double[]) Array.fill( k, new MyFunction0<Double>( )
{


@Override
public Double apply( )
{
return 1.0/k;
}
}, ClassManifestFactory.classType( Double.class ) );
MultivariateGaussian[] gaussians = (MultivariateGaussian[])Array.tabulate( k, new MyFunction1<Object, MultivariateGaussian>( )
{


@Override
public MultivariateGaussian apply( Object t )
{
int i = (int) t;
List<Vector> subList = samples.subList( i * nSamples, ( i + 1 )
* nSamples );
return new MultivariateGaussian( vectorMean0( subList ), initCovariance( subList ) );
}
}, ClassManifestFactory.classType( MultivariateGaussian.class ) );


double llh = -Double.MAX_VALUE;
double llhp = 0.0;
int iter = 0;
while ( iter < maxIterations
&& Math.abs( llh - llhp ) > convergenceTol )
{
Broadcast<Tuple2<Double[],MultivariateGaussian[] >> compute = sc.broadcast( new Tuple2<Double[],MultivariateGaussian[] >(weights, gaussians), ClassManifestFactory.classType( Tuple2.class ) );
ExpectationSum sums = data.toJavaRDD( ).aggregate( zero( k, d ), new Function2<ExpectationSum, Vector, ExpectationSum>( )
{


@Override
public ExpectationSum call( ExpectationSum v1, Vector v2 )
throws Exception
{
Tuple2<Double[],MultivariateGaussian[] > value = compute.value( );
return add( value._1, value._2, v1, v2 );
}
}, new  Function2<ExpectationSum, ExpectationSum, ExpectationSum>( )
{


@Override
public ExpectationSum call( ExpectationSum v1,
ExpectationSum v2 ) throws Exception
{
return combExpectationSum( v1, v2 );
}
});
double sumWeights = caleArraySum( sums.weights );

if (shouldDistributeGaussians)
{
int numPartitions = Math.min( k, 1024 );
List<Tuple3<Vector, Matrix, Double>> list = new ArrayList<Tuple3<Vector, Matrix, Double>>();
for (int i=0; i<sums.k; i++)
{
list.add( new Tuple3<Vector, Matrix, Double>(sums.means[i], sums.sigmas[i], sums.weights[i]) );
}

List<Tuple2<Double, MultivariateGaussian>> collectList = sc.parallelize( JavaConverters.asScalaIterableConverter( list ).asScala( ).toSeq( ), numPartitions, 
ClassManifestFactory.classType( Tuple3.class ) ).toJavaRDD( ).map( s-> updateWeightsAndGaussians(
s._1( ), s._2( ), s._3( ), sumWeights )).collect( );

for (int i=0; i<collectList.size( ); i++)
{
Tuple2<Double, MultivariateGaussian> value = collectList.get( i );
weights[i] = value._1;
gaussians[i] = value._2;
}

}
else
{
for (int i=0; i<k; i++)
{
Tuple2<Double, MultivariateGaussian> value = updateWeightsAndGaussians(sums.means[i], sums.sigmas[i], sums.weights[i], sumWeights);
weights[i] = value._1;
gaussians[i] = value._2;
}


}

llhp = llh ;
llh = sums.logLikelihood;
iter ++;
}



return new GaoGaussianMixtureModel(weights, gaussians);


}

private Double caleArraySum(Double[] ds)
{
Double ret = 0.0;
for (double d:ds)
{
ret = ret + d;
}
return ret;
}

public static Tuple2<Double, MultivariateGaussian> updateWeightsAndGaussians(Vector mean, Matrix sigma, Double weight, Double sumWeights)
{
Vector mu = Vectors.zeros( mean.size( ) );
BLAS.axpy( 1.0/weight, mean, mu );
BLAS.syr( -weight, mu, (DenseMatrix)sigma );

double newWeight = weight/sumWeights;

MultivariateGaussian newGaussian = new MultivariateGaussian(mu, scaleMatrix( sigma, 1.0/weight ));
return new Tuple2<Double, MultivariateGaussian>(newWeight, newGaussian);
}

private static Matrix scaleMatrix(Matrix m, Double scale)
{
double[] ds = m.toArray( );
for (int i=0; i<ds.length; i++)
{
ds[i] = ds[i]*scale;
}
return Matrices.dense( m.numRows( ), m.numCols( ), ds );
}

private static Matrix addMatrix(Matrix m, Matrix m1)
{

double[] ds = m.toArray( );
double[] ds1 = m1.toArray( );
double[] value = new double[ds.length];
for (int i=0; i<ds.length; i++)
{
value[i] = ds[i] + ds1[i];
}
return Matrices.dense( m.numRows( ), m.numCols( ), value );
}

private static Vector vectorMean0( List<Vector> list )
{
int size = list.size( );
Vector retValue = Vectors.zeros( list.get( 0 ).size( ) );
for ( Vector v : list )
{
BLAS.axpy( 1.0, v, retValue );
}
BLAS.scal( 1.0 / size, retValue );
return retValue;
}


private static Vector vectorMean( List<Vector> list )
{
int size = list.size( );
Vector retValue = Vectors.zeros( list.get( 0 ).size( ) );
for ( Vector v : list )
{
BLAS.axpy( 1.0, v, retValue );
}
double[] ds = retValue.toArray( );
for (int i=0; i< ds.length; i++)
{
ds[i] = div( ds[i], size, 1 );
}
return retValue;
}

private static double div(double d1,double d2,int scale)
{  
        BigDecimal b1=new BigDecimal(Double.toString(d1));  
        BigDecimal b2=new BigDecimal(Double.toString(d2));  
        return b1.divide(b2,scale,BigDecimal.ROUND_HALF_UP).doubleValue();  
    }


private static ExpectationSum combExpectationSum(ExpectationSum v1, ExpectationSum v2)
{
int i=0;
while (i < v1.k)
{
v1.weights[i] = v1.weights[i] + v2.weights[i];
BLAS.axpy( 1.0, v2.means[i], v1.means[i] );
v1.sigmas[i] = addMatrix( v1.sigmas[i], v2.sigmas[i] );
i++;
}

v1.logLikelihood = v1.logLikelihood + v2.logLikelihood;
return v1;
}

public static Matrix initCovariance( List<Vector> list )
{
Vector mean = vectorMean0( list );
int size = list.size( );
int vectorSize = list.get( 0 ).size( );
double[] ds = new double[vectorSize];


for ( int i = 0; i < list.size( ); i++ )
{
Vector v = list.get( i );
for ( int j = 0; j < vectorSize; j++ )
{
double value = v.apply( j ) - mean.apply( j );
ds[j] = ds[j] + value * value;
}
}


double[] retValue = new double[vectorSize * vectorSize];
for ( int i = 0; i < vectorSize; i++ )
{
for ( int j = 0; j < vectorSize; j++ )
{
int x = i * vectorSize + j;
double value = i == j ? ds[j] / size : 0.0;
retValue[x] = value;
}
}


return Matrices.dense( vectorSize, vectorSize, retValue );
}


private boolean shouldDistributeGaussians( int k, int d )
{
return ( ( k - 1.0 ) / k ) * d > 25;
}


public static class MyDefaultFunction0<R> extends AbstractFunction0<R> implements Serializable
{


private R r;


public MyDefaultFunction0( R r )
{
this.r = r;
}


@Override
public R apply( )
{
return r;
}


}

public static abstract class MyFunction0<R> extends AbstractFunction0<R> implements Serializable
{


}




public static abstract class MyFunction1<T1, R> extends AbstractFunction1<T1, R> implements Serializable
{


}


private static abstract class MyFunction2<T1, T2, R> extends AbstractFunction2<T1, T2, R> implements Serializable
{


}

public static ExpectationSum add(Double[] weights, MultivariateGaussian[] gaussians, ExpectationSum sums, Vector x)
{
Tuple2<Double[],Double> value = caleP( weights, gaussians, x );
Double[] p = value._1;
double pSum = value._2;

sums.logLikelihood = sums.logLikelihood + Math.log( pSum );

int i=0;
while (i<sums.k)
{
p[i] = p[i]/pSum;
sums.weights[i] = sums.weights[i] + p[i];
BLAS.axpy( p[i], x, sums.means[i] );
BLAS.syr( p[i], x, (DenseMatrix)sums.sigmas[i] );
i++;
}

return sums;
}

private static Tuple2<Double[],Double> caleP(Double[] weights, MultivariateGaussian[] gaussians, Vector x)
{
Double[] retValue = new Double[weights.length];
Double sum = 0.0;
for (int i=0; i<weights.length; i++)
{
retValue[i] = MLUtils$.MODULE$.EPSILON( ) + weights[i]*gaussians[i].pdf( x );
sum = sum + retValue[i];
}

return new Tuple2<Double[], Double>(retValue, sum );
}

public static ExpectationSum zero(int k, int d)
{
return new ExpectationSum(0.0, (Double[])Array.fill( k, new MyDefaultFunction0<Double>( 0.0 ), ClassManifestFactory.classType( Double.class ) ),
(Vector[])Array.fill( k, new MyFunction0<Vector>(  )
{


@Override
public Vector apply( )
{
return Vectors.zeros( d );
}

}, ClassManifestFactory.classType( Vector.class ) ),
(Matrix[])Array.fill( k, new MyFunction0<Matrix>( )
{


@Override
public Matrix apply( )
{
return  Matrices.zeros( d, d );
}

}, ClassManifestFactory.classType( Matrix.class ) ));
}


private static class ExpectationSum implements Serializable
{


double logLikelihood;
Double[] weights;
Vector[] means;
Matrix[] sigmas;
int k;

public ExpectationSum( double logLikelihood, Double[] weights, Vector[] means, Matrix[] sigmas )
{
super( );
this.logLikelihood = logLikelihood;
this.weights = weights;
this.means = means;
this.sigmas = sigmas;
k = weights.length;
}
}
}

猜你喜欢

转载自blog.csdn.net/hhtop112408/article/details/78814370
今日推荐