| 1 | /* $Id$ $Revision$ */ |
| 2 | /* vim:set shiftwidth=4 ts=8: */ |
| 3 | |
| 4 | /************************************************************************* |
| 5 | * Copyright (c) 2011 AT&T Intellectual Property |
| 6 | * All rights reserved. This program and the accompanying materials |
| 7 | * are made available under the terms of the Eclipse Public License v1.0 |
| 8 | * which accompanies this distribution, and is available at |
| 9 | * http://www.eclipse.org/legal/epl-v10.html |
| 10 | * |
| 11 | * Contributors: See CVS logs. Details at http://www.graphviz.org/ |
| 12 | *************************************************************************/ |
| 13 | |
| 14 | #include "digcola.h" |
| 15 | #ifdef DIGCOLA |
| 16 | #include "kkutils.h" |
| 17 | #include "matrix_ops.h" |
| 18 | #include "conjgrad.h" |
| 19 | |
| 20 | static void |
| 21 | standardize(double* orthog, int nvtxs) |
| 22 | { |
| 23 | double len, avg = 0; |
| 24 | int i; |
| 25 | for (i=0; i<nvtxs; i++) |
| 26 | avg+=orthog[i]; |
| 27 | avg/=nvtxs; |
| 28 | |
| 29 | /* centralize: */ |
| 30 | for (i=0; i<nvtxs; i++) |
| 31 | orthog[i]-=avg; |
| 32 | |
| 33 | /* normalize: */ |
| 34 | len = norm(orthog, 0, nvtxs-1); |
| 35 | vecscale(orthog, 0, nvtxs-1, 1.0 / len, orthog); |
| 36 | } |
| 37 | |
| 38 | static void |
| 39 | mat_mult_vec_orthog(float** mat, int dim1, int dim2, double* vec, |
| 40 | double* result, double* orthog) |
| 41 | { |
| 42 | /* computes mat*vec, where mat is a dim1*dim2 matrix */ |
| 43 | int i,j; |
| 44 | double sum; |
| 45 | |
| 46 | for (i=0; i<dim1; i++) { |
| 47 | sum=0; |
| 48 | for (j=0; j<dim2; j++) { |
| 49 | sum += mat[i][j]*vec[j]; |
| 50 | } |
| 51 | result[i]=sum; |
| 52 | } |
| 53 | if (orthog!=NULL) { |
| 54 | double alpha=-dot(result,0,dim1-1,orthog); |
| 55 | scadd(result, 0, dim1-1, alpha, orthog); |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | static void |
| 60 | power_iteration_orthog(float** square_mat, int n, int neigs, |
| 61 | double** eigs, double* evals, double* orthog, double p_iteration_threshold) |
| 62 | { |
| 63 | /* |
| 64 | * Power-Iteration with (I-orthog*orthog^T)*square_mat*(I-orthog*orthog^T) |
| 65 | */ |
| 66 | |
| 67 | int i,j; |
| 68 | double *tmp_vec = N_GNEW(n, double); |
| 69 | double *last_vec = N_GNEW(n, double); |
| 70 | double *curr_vector; |
| 71 | double len; |
| 72 | double angle; |
| 73 | double alpha; |
| 74 | int iteration; |
| 75 | int largest_index; |
| 76 | double largest_eval; |
| 77 | |
| 78 | double tol=1-p_iteration_threshold; |
| 79 | |
| 80 | if (neigs>=n) { |
| 81 | neigs=n; |
| 82 | } |
| 83 | |
| 84 | for (i=0; i<neigs; i++) { |
| 85 | curr_vector = eigs[i]; |
| 86 | /* guess the i-th eigen vector */ |
| 87 | choose: |
| 88 | for (j=0; j<n; j++) { |
| 89 | curr_vector[j] = rand()%100; |
| 90 | } |
| 91 | |
| 92 | if (orthog!=NULL) { |
| 93 | alpha=-dot(orthog,0,n-1,curr_vector); |
| 94 | scadd(curr_vector, 0, n-1, alpha, orthog); |
| 95 | } |
| 96 | // orthogonalize against higher eigenvectors |
| 97 | for (j=0; j<i; j++) { |
| 98 | alpha = -dot(eigs[j], 0, n-1, curr_vector); |
| 99 | scadd(curr_vector, 0, n-1, alpha, eigs[j]); |
| 100 | } |
| 101 | len = norm(curr_vector, 0, n-1); |
| 102 | if (len<1e-10) { |
| 103 | /* We have chosen a vector colinear with prvious ones */ |
| 104 | goto choose; |
| 105 | } |
| 106 | vecscale(curr_vector, 0, n-1, 1.0 / len, curr_vector); |
| 107 | iteration=0; |
| 108 | do { |
| 109 | iteration++; |
| 110 | cpvec(last_vec,0,n-1,curr_vector); |
| 111 | |
| 112 | mat_mult_vec_orthog(square_mat,n,n,curr_vector,tmp_vec,orthog); |
| 113 | cpvec(curr_vector,0,n-1,tmp_vec); |
| 114 | |
| 115 | /* orthogonalize against higher eigenvectors */ |
| 116 | for (j=0; j<i; j++) { |
| 117 | alpha = -dot(eigs[j], 0, n-1, curr_vector); |
| 118 | scadd(curr_vector, 0, n-1, alpha, eigs[j]); |
| 119 | } |
| 120 | len = norm(curr_vector, 0, n-1); |
| 121 | if (len<1e-10) { |
| 122 | /* We have reached the null space (e.vec. associated |
| 123 | * with e.val. 0) |
| 124 | */ |
| 125 | goto exit; |
| 126 | } |
| 127 | |
| 128 | vecscale(curr_vector, 0, n-1, 1.0 / len, curr_vector); |
| 129 | angle = dot(curr_vector, 0, n-1, last_vec); |
| 130 | } while (fabs(angle)<tol); |
| 131 | /* the Rayleigh quotient (up to errors due to orthogonalization): |
| 132 | * u*(A*u)/||A*u||)*||A*u||, where u=last_vec, and ||u||=1 |
| 133 | */ |
| 134 | evals[i]=angle*len; |
| 135 | } |
| 136 | exit: |
| 137 | for (; i<neigs; i++) { |
| 138 | /* compute the smallest eigenvector, which are |
| 139 | * probably associated with eigenvalue 0 and for |
| 140 | * which power-iteration is dangerous |
| 141 | */ |
| 142 | curr_vector = eigs[i]; |
| 143 | /* guess the i-th eigen vector */ |
| 144 | for (j=0; j<n; j++) |
| 145 | curr_vector[j] = rand()%100; |
| 146 | /* orthogonalize against higher eigenvectors */ |
| 147 | for (j=0; j<i; j++) { |
| 148 | alpha = -dot(eigs[j], 0, n-1, curr_vector); |
| 149 | scadd(curr_vector, 0, n-1, alpha, eigs[j]); |
| 150 | } |
| 151 | len = norm(curr_vector, 0, n-1); |
| 152 | vecscale(curr_vector, 0, n-1, 1.0 / len, curr_vector); |
| 153 | evals[i]=0; |
| 154 | |
| 155 | } |
| 156 | |
| 157 | /* sort vectors by their evals, for overcoming possible mis-convergence: */ |
| 158 | for (i=0; i<neigs-1; i++) { |
| 159 | largest_index=i; |
| 160 | largest_eval=evals[largest_index]; |
| 161 | for (j=i+1; j<neigs; j++) { |
| 162 | if (largest_eval<evals[j]) { |
| 163 | largest_index=j; |
| 164 | largest_eval=evals[largest_index]; |
| 165 | } |
| 166 | } |
| 167 | if (largest_index!=i) { // exchange eigenvectors: |
| 168 | cpvec(tmp_vec,0,n-1,eigs[i]); |
| 169 | cpvec(eigs[i],0,n-1,eigs[largest_index]); |
| 170 | cpvec(eigs[largest_index],0,n-1,tmp_vec); |
| 171 | |
| 172 | evals[largest_index]=evals[i]; |
| 173 | evals[i]=largest_eval; |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | free (tmp_vec); free (last_vec); |
| 178 | |
| 179 | } |
| 180 | |
| 181 | static float* |
| 182 | compute_avgs(DistType** Dij, int n, float* all_avg) |
| 183 | { |
| 184 | float* row_avg = N_GNEW(n, float); |
| 185 | int i,j; |
| 186 | double sum=0, sum_row; |
| 187 | |
| 188 | for (i=0; i<n; i++) { |
| 189 | sum_row=0; |
| 190 | for (j=0; j<n; j++) { |
| 191 | sum+=(double)Dij[i][j]*(double)Dij[i][j]; |
| 192 | sum_row+=(double)Dij[i][j]*(double)Dij[i][j]; |
| 193 | } |
| 194 | row_avg[i]=(float)sum_row/n; |
| 195 | } |
| 196 | *all_avg=(float)sum/(n*n); |
| 197 | return row_avg; |
| 198 | } |
| 199 | |
| 200 | static float** |
| 201 | compute_Bij(DistType** Dij, int n) |
| 202 | { |
| 203 | int i,j; |
| 204 | float* storage = N_GNEW(n*n,float); |
| 205 | float** Bij = N_GNEW(n, float*); |
| 206 | float* row_avg; |
| 207 | float all_avg; |
| 208 | |
| 209 | for (i=0; i<n; i++) |
| 210 | Bij[i] = storage+i*n; |
| 211 | |
| 212 | row_avg = compute_avgs(Dij, n, &all_avg); |
| 213 | for (i=0; i<n; i++) { |
| 214 | for (j=0; j<=i; j++) { |
| 215 | Bij[i][j]=-(float)Dij[i][j]*Dij[i][j]+row_avg[i]+row_avg[j]-all_avg; |
| 216 | Bij[j][i]=Bij[i][j]; |
| 217 | } |
| 218 | } |
| 219 | free (row_avg); |
| 220 | return Bij; |
| 221 | } |
| 222 | |
| 223 | static void |
| 224 | CMDS_orthog(vtx_data* graph, int n, int dim, double** eigs, double tol, |
| 225 | double* orthog, DistType** Dij) |
| 226 | { |
| 227 | int i,j; |
| 228 | float** Bij = compute_Bij(Dij, n); |
| 229 | double* evals= N_GNEW(dim, double); |
| 230 | |
| 231 | double * orthog_aux = NULL; |
| 232 | if (orthog!=NULL) { |
| 233 | orthog_aux = N_GNEW(n, double); |
| 234 | for (i=0; i<n; i++) { |
| 235 | orthog_aux[i]=orthog[i]; |
| 236 | } |
| 237 | standardize(orthog_aux,n); |
| 238 | } |
| 239 | power_iteration_orthog(Bij, n, dim, eigs, evals, orthog_aux, tol); |
| 240 | |
| 241 | for (i=0; i<dim; i++) { |
| 242 | for (j=0; j<n; j++) { |
| 243 | eigs[i][j]*=sqrt(fabs(evals[i])); |
| 244 | } |
| 245 | } |
| 246 | free (Bij[0]); free (Bij); |
| 247 | free (evals); free (orthog_aux); |
| 248 | } |
| 249 | |
| 250 | #define SCALE_FACTOR 256 |
| 251 | |
| 252 | int IMDS_given_dim(vtx_data* graph, int n, double* given_coords, |
| 253 | double* new_coords, double conj_tol) |
| 254 | { |
| 255 | int iterations2; |
| 256 | int i,j, rv = 0; |
| 257 | DistType** Dij; |
| 258 | float* f_storage = NULL; |
| 259 | double* x = given_coords; |
| 260 | double uniLength; |
| 261 | double* orthog_aux = NULL; |
| 262 | double* y = new_coords; |
| 263 | float** lap = N_GNEW(n, float*); |
| 264 | float degree; |
| 265 | double pos_i; |
| 266 | double* balance = N_GNEW(n, double); |
| 267 | double b; |
| 268 | boolean converged; |
| 269 | |
| 270 | #if 0 |
| 271 | iterations1=mat_mult_count1=0; /* We don't compute the x-axis at all. */ |
| 272 | #endif |
| 273 | |
| 274 | Dij = compute_apsp(graph, n); |
| 275 | |
| 276 | /* scaling up the distances to enable an 'sqrt' operation later |
| 277 | * (in case distances are integers) |
| 278 | */ |
| 279 | for (i=0; i<n; i++) |
| 280 | for (j=0; j<n; j++) |
| 281 | Dij[i][j]*=SCALE_FACTOR; |
| 282 | |
| 283 | assert(x!=NULL); |
| 284 | { |
| 285 | double sum1, sum2; |
| 286 | /* scale x (given axis) to minimize the stress */ |
| 287 | orthog_aux = N_GNEW(n, double); |
| 288 | for (i=0; i<n; i++) { |
| 289 | orthog_aux[i]=x[i]; |
| 290 | } |
| 291 | standardize(orthog_aux,n); |
| 292 | |
| 293 | for (sum1=sum2=0,i=1; i<n; i++) { |
| 294 | for (j=0; j<i; j++) { |
| 295 | sum1+=1.0/(Dij[i][j])*fabs(x[i]-x[j]); |
| 296 | sum2+=1.0/(Dij[i][j]*Dij[i][j])*fabs(x[i]-x[j])*fabs(x[i]-x[j]); |
| 297 | } |
| 298 | } |
| 299 | uniLength=sum1/sum2; |
| 300 | for (i=0; i<n; i++) |
| 301 | x[i]*=uniLength; |
| 302 | } |
| 303 | |
| 304 | /* smart ini: */ |
| 305 | CMDS_orthog(graph, n, 1, &y, conj_tol, x, Dij); |
| 306 | |
| 307 | /* Compute Laplacian: */ |
| 308 | f_storage = N_GNEW(n*n, float); |
| 309 | |
| 310 | for (i=0; i<n; i++) { |
| 311 | lap[i]=f_storage+i*n; |
| 312 | degree=0; |
| 313 | for (j=0; j<n; j++) { |
| 314 | if (j==i) |
| 315 | continue; |
| 316 | degree-=lap[i][j]=-1.0f/((float)Dij[i][j]*(float)Dij[i][j]); // w_{ij} |
| 317 | |
| 318 | } |
| 319 | lap[i][i]=degree; |
| 320 | } |
| 321 | |
| 322 | |
| 323 | /* compute residual distances */ |
| 324 | /* if (x!=NULL) */ |
| 325 | { |
| 326 | double diff; |
| 327 | for (i=1; i<n; i++) { |
| 328 | pos_i=x[i]; |
| 329 | for (j=0; j<i; j++) { |
| 330 | diff=(double)Dij[i][j]*(double)Dij[i][j]-(pos_i-x[j])*(pos_i-x[j]); |
| 331 | Dij[i][j]=Dij[j][i]=diff>0 ? (DistType)sqrt(diff) : 0; |
| 332 | } |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | /* Compute the balance vector: */ |
| 337 | for (i=0; i<n; i++) { |
| 338 | pos_i=y[i]; |
| 339 | balance[i]=0; |
| 340 | for (j=0; j<n; j++) { |
| 341 | if (j==i) |
| 342 | continue; |
| 343 | if (pos_i>=y[j]) { |
| 344 | balance[i]+=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij} |
| 345 | } |
| 346 | else { |
| 347 | balance[i]-=Dij[i][j]*(-lap[i][j]); // w_{ij}*delta_{ij} |
| 348 | } |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | for (converged=FALSE,iterations2=0; iterations2<200 && !converged; iterations2++) { |
| 353 | if (conjugate_gradient_f(lap, y, balance, n, conj_tol, n, TRUE) < 0) { |
| 354 | rv = 1; |
| 355 | goto cleanup; |
| 356 | } |
| 357 | converged=TRUE; |
| 358 | for (i=0; i<n; i++) { |
| 359 | pos_i=y[i]; |
| 360 | b=0; |
| 361 | for (j=0; j<n; j++) { |
| 362 | if (j==i) |
| 363 | continue; |
| 364 | if (pos_i>=y[j]) { |
| 365 | b+=Dij[i][j]*(-lap[i][j]); |
| 366 | |
| 367 | } |
| 368 | else { |
| 369 | b-=Dij[i][j]*(-lap[i][j]); |
| 370 | |
| 371 | } |
| 372 | } |
| 373 | if ((b != balance[i]) && (fabs(1-b/balance[i])>1e-5)) { |
| 374 | converged=FALSE; |
| 375 | balance[i]=b; |
| 376 | } |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | for (i=0; i<n; i++) { |
| 381 | x[i] /= uniLength; |
| 382 | y[i] /= uniLength; |
| 383 | } |
| 384 | |
| 385 | cleanup: |
| 386 | |
| 387 | free (Dij[0]); free (Dij); |
| 388 | free (lap[0]); free (lap); |
| 389 | free (orthog_aux); free (balance); |
| 390 | return rv; |
| 391 | } |
| 392 | |
| 393 | #endif /* DIGCOLA */ |
| 394 | |
| 395 | |