Java Code Examples for weka.core.matrix.Matrix#get()
The following examples show how to use
weka.core.matrix.Matrix#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GaussianProcesses.java From tsml with GNU General Public License v3.0 | 6 votes |
/** * Computes standard deviation for given instance, without * transforming target back into original space. */ protected double computeStdDev(Instance inst, Matrix k) throws Exception { double kappa = m_kernel.eval(-1, -1, inst) + m_delta * m_delta; double s = 0; int n = m_L.length; for (int i = 0; i < n; i++) { double t = 0; for (int j = 0; j < n; j++) { t -= k.get(j,0) * (i>j? m_L[i][j] : m_L[j][i]); } s += t * k.get(i,0); } double sigma = m_delta; if (kappa > s) { sigma = Math.sqrt(kappa - s); } return sigma; }
Example 2
Source File: sIB.java From tsml with GNU General Public License v3.0 | 5 votes |
/** * Compute the sIB score * @param m a term-cluster matrix, with m[i, j] is the probability of term i given cluster j * @param Pt an array of cluster prior probabilities * @return the sIB score which indicates the quality of the partition */ private double sIB_local_MI(Matrix m, double[] Pt) { double Hy = 0.0, Ht = 0.0; for (int i = 0; i < Pt.length; i++) { Ht += Pt[i] * Math.log(Pt[i]); } Ht = -Ht; for (int i = 0; i < m_numAttributes; i++) { double Py = 0.0; for (int j = 0; j < m_numCluster; j++) { Py += m.get(i, j) * Pt[j]; } if(Py == 0) continue; Hy += Py * Math.log(Py); } Hy = -Hy; double Hyt = 0.0, tmp = 0.0; for (int i = 0; i < m.getRowDimension(); i++) { for (int j = 0; j < m.getColumnDimension(); j++) { if ((tmp = m.get(i, j)) == 0 || Pt[j] == 0) { continue; } tmp *= Pt[j]; Hyt += tmp * Math.log(tmp); } } return Hy + Ht + Hyt; }
Example 3
Source File: sIB.java From tsml with GNU General Public License v3.0 | 5 votes |
/** * Compute the MI between instances and attributes * @param m the term-document matrix * @param input object that describes the statistics about the training data */ private void MI(Matrix m, Input input){ int minDimSize = m.getColumnDimension() < m.getRowDimension() ? m.getColumnDimension() : m.getRowDimension(); if(minDimSize < 2){ System.err.println("Warning : This is not a JOINT distribution"); input.Hx = Entropy (m); input.Hy = 0; input.Ixy = 0; return; } input.Hx = Entropy(input.Px); input.Hy = Entropy(input.Py); double entropy = input.Hx + input.Hy; for (int i=0; i < m_numInstances; i++) { Instance inst = m_data.instance(i); for (int v = 0; v < inst.numValues(); v++) { double tmp = m.get(inst.index(v), i); if(tmp <= 0) continue; entropy += tmp * Math.log(tmp); } } input.Ixy = entropy; if(m_verbose) { System.out.println("Ixy = " + input.Ixy); } }
Example 4
Source File: sIB.java From tsml with GNU General Public License v3.0 | 5 votes |
/** * Compute the entropy score based on a matrix * @param p a matrix with non-negative and normalized probabilities * @return the entropy value */ private double Entropy(Matrix p) { double mi = 0; for (int i = 0; i < p.getRowDimension(); i++) { for (int j = 0; j < p.getColumnDimension(); j++) { if(p.get(i, j) == 0){ continue; } mi += p.get(i, j) + Math.log(p.get(i, j)); } } mi = -mi; return mi; }
Example 5
Source File: PLSFilter.java From tsml with GNU General Public License v3.0 | 5 votes |
/** * normalizes the given vector (inplace) * * @param v the vector to normalize */ protected void normalizeVector(Matrix v) { double sum; int i; // determine length sum = 0; for (i = 0; i < v.getRowDimension(); i++) sum += v.get(i, 0) * v.get(i, 0); sum = StrictMath.sqrt(sum); // normalize content for (i = 0; i < v.getRowDimension(); i++) v.set(i, 0, v.get(i, 0) / sum); }
Example 6
Source File: LinearModel.java From tsml with GNU General Public License v3.0 | 5 votes |
public double[] formTestPredictions(Instances testData) { //Form X matrix from testData int rows=testData.numInstances(); int cols=testData.numAttributes(); //includes the constant term predicted=new double[rows]; if(cols!=m) { System.out.println("Error: Mismatch in attribute lengths in form test Train ="+m+" Test ="+cols); System.exit(0); } double[][] xt = new double[cols][rows]; for(int i=0;i<rows;i++) xt[0][i]=1; for(int i=1;i<cols;i++) xt[i]=testData.attributeToDoubleArray(i-1); Matrix testX=new Matrix(xt); testX=testX.transpose(); for(int i=0;i<rows;i++) { //Find predicted predicted[i]=paras[0]; for(int j=1;j<paras.length;j++) predicted[i]+=paras[j]*testX.get(i,j); } return predicted; }
Example 7
Source File: LatentSemanticAnalysis.java From tsml with GNU General Public License v3.0 | 4 votes |
/** * Transform an instance in original (unnormalized) format * @param instance an instance in the original (unnormalized) format * @return a transformed instance * @throws Exception if instance can't be transformed */ public Instance convertInstance(Instance instance) throws Exception { if (m_s == null) { throw new Exception("convertInstance: Latent Semantic Analysis not " + "performed yet."); } // array to hold new attribute values double [] newValues = new double[m_outputNumAttributes]; // apply filters so new instance is in same format as training instances Instance tempInstance = (Instance)instance.copy(); if (!instance.dataset().equalHeaders(m_trainHeader)) { throw new Exception("Can't convert instance: headers don't match: " + "LatentSemanticAnalysis"); } // replace missing values m_replaceMissingFilter.input(tempInstance); m_replaceMissingFilter.batchFinished(); tempInstance = m_replaceMissingFilter.output(); // normalize if (m_normalize) { m_normalizeFilter.input(tempInstance); m_normalizeFilter.batchFinished(); tempInstance = m_normalizeFilter.output(); } // convert nominal attributes to binary m_nominalToBinaryFilter.input(tempInstance); m_nominalToBinaryFilter.batchFinished(); tempInstance = m_nominalToBinaryFilter.output(); // remove class/other attributes if (m_attributeFilter != null) { m_attributeFilter.input(tempInstance); m_attributeFilter.batchFinished(); tempInstance = m_attributeFilter.output(); } // record new attribute values if (m_hasClass) { // copy class value newValues[m_outputNumAttributes - 1] = instance.classValue(); } double [][] oldInstanceValues = new double[1][m_numAttributes]; oldInstanceValues[0] = tempInstance.toDoubleArray(); Matrix instanceVector = new Matrix(oldInstanceValues); // old attribute values instanceVector = instanceVector.times(m_transformationMatrix); // new attribute values for (int i = 0; i < m_actualRank; i++) { newValues[i] = instanceVector.get(0, i); } // return newly transformed instance if (instance instanceof SparseInstance) { return new SparseInstance(instance.weight(), newValues); } else { return new DenseInstance(instance.weight(), newValues); } }
Example 8
Source File: LLGC.java From collective-classification-weka-package with GNU General Public License v3.0 | 4 votes |
/** * initializes the matrices * * @throws Exception if something goes wrong */ protected void initialize() throws Exception { int numInst; int numCls; int i; int n; double d; double sum; double factor; numInst = m_Data.size(); numCls = m_TrainsetNew.numClasses(); // the classification matrix Y clock(); m_MatrixY = new Matrix(numInst, numCls); for (i = 0; i < numInst; i++) { if (!m_Data.get(i).classIsMissing()) m_MatrixY.set(i, (int) m_Data.get(i).classValue(), 1.0); } clock("Matrix Y"); // the affinity matrix W // calc distances and variance of distances (i.e., sample variance) clock(); if (getIncludeNumAttributes()) factor = m_TrainsetNew.numAttributes(); else factor = 1; m_DistanceFunction.setInstances(m_TrainsetNew); m_MatrixW = new Matrix(numInst, numInst); for (i = 0; i < numInst; i++) { for (n = 0; n < numInst; n++) { if (i == n) { d = 0; } else { d = m_DistanceFunction.distance(m_Data.get(i), m_Data.get(n)); d = StrictMath.exp( -StrictMath.pow(d, 2) / (2 * getSigma() * getSigma() * factor)); } m_MatrixW.set(i, n, d); } } clock("Matrix W"); // the diagonal matrix D clock(); m_MatrixD = new Matrix(numInst, numInst); for (i = 0; i < numInst; i++) { sum = 0; for (n = 0; n < numInst; n++) sum += m_MatrixW.get(i, n); m_MatrixD.set(i, i, sum); } clock("Matrix D"); // calc S or P (both results are stored in S for simplicity) clock(); switch (m_SequenceLimit) { case SEQ_LIMIT_GRAPHKERNEL: // D^-1/2 m_MatrixD = m_MatrixD.sqrt().inverse(); // S = D^-1/2 * W * D^-1/2 m_MatrixS = m_MatrixD.times(m_MatrixW).times(m_MatrixD); break; case SEQ_LIMIT_STOCHASTICMATRIX: // P = D^-1 * W m_MatrixS = m_MatrixD.inverse().times(m_MatrixW); break; case SEQ_LIMIT_STOCHASTICMATRIX_T: // P^T = (D^-1 * W)^T m_MatrixS = m_MatrixD.inverse().times(m_MatrixW).transpose(); break; default: throw new Exception("Unknown sequence limit function: " + m_SequenceLimit + "!"); } clock("Matrix S/P"); }