1/* $Id$ $Revision$ */
2/* vim:set shiftwidth=4 ts=8: */
3
4/*************************************************************************
5 * Copyright (c) 2011 AT&T Intellectual Property
6 * All rights reserved. This program and the accompanying materials
7 * are made available under the terms of the Eclipse Public License v1.0
8 * which accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
10 *
11 * Contributors: See CVS logs. Details at http://www.graphviz.org/
12 *************************************************************************/
13
14
15#include "matrix_ops.h"
16#include "memory.h"
17#include <stdlib.h>
18#include <stdio.h>
19#include <math.h>
20
21static double p_iteration_threshold = 1e-3;
22
23int
24power_iteration(double **square_mat, int n, int neigs, double **eigs,
25 double *evals, int initialize)
26{
27 /* compute the 'neigs' top eigenvectors of 'square_mat' using power iteration */
28
29 int i, j;
30 double *tmp_vec = N_GNEW(n, double);
31 double *last_vec = N_GNEW(n, double);
32 double *curr_vector;
33 double len;
34 double angle;
35 double alpha;
36 int iteration = 0;
37 int largest_index;
38 double largest_eval;
39 int Max_iterations = 30 * n;
40
41 double tol = 1 - p_iteration_threshold;
42
43 if (neigs >= n) {
44 neigs = n;
45 }
46
47 for (i = 0; i < neigs; i++) {
48 curr_vector = eigs[i];
49 /* guess the i-th eigen vector */
50 choose:
51 if (initialize)
52 for (j = 0; j < n; j++)
53 curr_vector[j] = rand() % 100;
54 /* orthogonalize against higher eigenvectors */
55 for (j = 0; j < i; j++) {
56 alpha = -dot(eigs[j], 0, n - 1, curr_vector);
57 scadd(curr_vector, 0, n - 1, alpha, eigs[j]);
58 }
59 len = norm(curr_vector, 0, n - 1);
60 if (len < 1e-10) {
61 /* We have chosen a vector colinear with prvious ones */
62 goto choose;
63 }
64 vecscale(curr_vector, 0, n - 1, 1.0 / len, curr_vector);
65 iteration = 0;
66 do {
67 iteration++;
68 cpvec(last_vec, 0, n - 1, curr_vector);
69
70 right_mult_with_vector_d(square_mat, n, n, curr_vector,
71 tmp_vec);
72 cpvec(curr_vector, 0, n - 1, tmp_vec);
73
74 /* orthogonalize against higher eigenvectors */
75 for (j = 0; j < i; j++) {
76 alpha = -dot(eigs[j], 0, n - 1, curr_vector);
77 scadd(curr_vector, 0, n - 1, alpha, eigs[j]);
78 }
79 len = norm(curr_vector, 0, n - 1);
80 if (len < 1e-10 || iteration > Max_iterations) {
81 /* We have reached the null space (e.vec. associated with e.val. 0) */
82 goto exit;
83 }
84
85 vecscale(curr_vector, 0, n - 1, 1.0 / len, curr_vector);
86 angle = dot(curr_vector, 0, n - 1, last_vec);
87 } while (fabs(angle) < tol);
88 evals[i] = angle * len; /* this is the Rayleigh quotient (up to errors due to orthogonalization):
89 u*(A*u)/||A*u||)*||A*u||, where u=last_vec, and ||u||=1
90 */
91 }
92 exit:
93 for (; i < neigs; i++) {
94 /* compute the smallest eigenvector, which are */
95 /* probably associated with eigenvalue 0 and for */
96 /* which power-iteration is dangerous */
97 curr_vector = eigs[i];
98 /* guess the i-th eigen vector */
99 for (j = 0; j < n; j++)
100 curr_vector[j] = rand() % 100;
101 /* orthogonalize against higher eigenvectors */
102 for (j = 0; j < i; j++) {
103 alpha = -dot(eigs[j], 0, n - 1, curr_vector);
104 scadd(curr_vector, 0, n - 1, alpha, eigs[j]);
105 }
106 len = norm(curr_vector, 0, n - 1);
107 vecscale(curr_vector, 0, n - 1, 1.0 / len, curr_vector);
108 evals[i] = 0;
109
110 }
111
112
113 /* sort vectors by their evals, for overcoming possible mis-convergence: */
114 for (i = 0; i < neigs - 1; i++) {
115 largest_index = i;
116 largest_eval = evals[largest_index];
117 for (j = i + 1; j < neigs; j++) {
118 if (largest_eval < evals[j]) {
119 largest_index = j;
120 largest_eval = evals[largest_index];
121 }
122 }
123 if (largest_index != i) { /* exchange eigenvectors: */
124 cpvec(tmp_vec, 0, n - 1, eigs[i]);
125 cpvec(eigs[i], 0, n - 1, eigs[largest_index]);
126 cpvec(eigs[largest_index], 0, n - 1, tmp_vec);
127
128 evals[largest_index] = evals[i];
129 evals[i] = largest_eval;
130 }
131 }
132
133 free(tmp_vec);
134 free(last_vec);
135
136 return (iteration <= Max_iterations);
137}
138
139
140
141void
142mult_dense_mat(double **A, float **B, int dim1, int dim2, int dim3,
143 float ***CC)
144{
145/*
146 A is dim1 x dim2, B is dim2 x dim3, C = A x B
147*/
148
149 double sum;
150 int i, j, k;
151 float *storage;
152 float **C = *CC;
153 if (C != NULL) {
154 storage = (float *) realloc(C[0], dim1 * dim3 * sizeof(A[0]));
155 *CC = C = (float **) realloc(C, dim1 * sizeof(A));
156 } else {
157 storage = (float *) malloc(dim1 * dim3 * sizeof(A[0]));
158 *CC = C = (float **) malloc(dim1 * sizeof(A));
159 }
160
161 for (i = 0; i < dim1; i++) {
162 C[i] = storage;
163 storage += dim3;
164 }
165
166 for (i = 0; i < dim1; i++) {
167 for (j = 0; j < dim3; j++) {
168 sum = 0;
169 for (k = 0; k < dim2; k++) {
170 sum += A[i][k] * B[k][j];
171 }
172 C[i][j] = (float) (sum);
173 }
174 }
175}
176
177void
178mult_dense_mat_d(double **A, float **B, int dim1, int dim2, int dim3,
179 double ***CC)
180{
181/*
182 A is dim1 x dim2, B is dim2 x dim3, C = A x B
183*/
184 double **C = *CC;
185 double *storage;
186 int i, j, k;
187 double sum;
188
189 if (C != NULL) {
190 storage = (double *) realloc(C[0], dim1 * dim3 * sizeof(double));
191 *CC = C = (double **) realloc(C, dim1 * sizeof(double *));
192 } else {
193 storage = (double *) malloc(dim1 * dim3 * sizeof(double));
194 *CC = C = (double **) malloc(dim1 * sizeof(double *));
195 }
196
197 for (i = 0; i < dim1; i++) {
198 C[i] = storage;
199 storage += dim3;
200 }
201
202 for (i = 0; i < dim1; i++) {
203 for (j = 0; j < dim3; j++) {
204 sum = 0;
205 for (k = 0; k < dim2; k++) {
206 sum += A[i][k] * B[k][j];
207 }
208 C[i][j] = sum;
209 }
210 }
211}
212
213void
214mult_sparse_dense_mat_transpose(vtx_data * A, double **B, int dim1,
215 int dim2, float ***CC)
216{
217/*
218 A is dim1 x dim1 and sparse, B is dim2 x dim1, C = A x B
219*/
220
221 float *storage;
222 int i, j, k;
223 double sum;
224 float *ewgts;
225 int *edges;
226 int nedges;
227 float **C = *CC;
228 if (C != NULL) {
229 storage = (float *) realloc(C[0], dim1 * dim2 * sizeof(A[0]));
230 *CC = C = (float **) realloc(C, dim1 * sizeof(A));
231 } else {
232 storage = (float *) malloc(dim1 * dim2 * sizeof(A[0]));
233 *CC = C = (float **) malloc(dim1 * sizeof(A));
234 }
235
236 for (i = 0; i < dim1; i++) {
237 C[i] = storage;
238 storage += dim2;
239 }
240
241 for (i = 0; i < dim1; i++) {
242 edges = A[i].edges;
243 ewgts = A[i].ewgts;
244 nedges = A[i].nedges;
245 for (j = 0; j < dim2; j++) {
246 sum = 0;
247 for (k = 0; k < nedges; k++) {
248 sum += ewgts[k] * B[j][edges[k]];
249 }
250 C[i][j] = (float) (sum);
251 }
252 }
253}
254
255
256
257/* Copy a range of a double vector to a double vector */
258void cpvec(double *copy, int beg, int end, double *vec)
259{
260 int i;
261
262 copy = copy + beg;
263 vec = vec + beg;
264 for (i = end - beg + 1; i; i--) {
265 *copy++ = *vec++;
266 }
267}
268
269/* Returns scalar product of two double n-vectors. */
270double dot(double *vec1, int beg, int end, double *vec2)
271{
272 int i;
273 double sum;
274
275 sum = 0.0;
276 vec1 = vec1 + beg;
277 vec2 = vec2 + beg;
278 for (i = end - beg + 1; i; i--) {
279 sum += (*vec1++) * (*vec2++);
280 }
281 return (sum);
282}
283
284
285/* Scaled add - fills double vec1 with vec1 + alpha*vec2 over range*/
286void scadd(double *vec1, int beg, int end, double fac, double *vec2)
287{
288 int i;
289
290 vec1 = vec1 + beg;
291 vec2 = vec2 + beg;
292 for (i = end - beg + 1; i; i--) {
293 (*vec1++) += fac * (*vec2++);
294 }
295}
296
297/* Scale - fills vec1 with alpha*vec2 over range, double version */
298void vecscale(double *vec1, int beg, int end, double alpha, double *vec2)
299{
300 int i;
301
302 vec1 += beg;
303 vec2 += beg;
304 for (i = end - beg + 1; i; i--) {
305 (*vec1++) = alpha * (*vec2++);
306 }
307}
308
309/* Returns 2-norm of a double n-vector over range. */
310double norm(double *vec, int beg, int end)
311{
312 return (sqrt(dot(vec, beg, end, vec)));
313}
314
315
316#ifndef __cplusplus
317
318/* inline */
319void orthog1(int n, double *vec /* vector to be orthogonalized against 1 */
320 )
321{
322 int i;
323 double *pntr;
324 double sum;
325
326 sum = 0.0;
327 pntr = vec;
328 for (i = n; i; i--) {
329 sum += *pntr++;
330 }
331 sum /= n;
332 pntr = vec;
333 for (i = n; i; i--) {
334 *pntr++ -= sum;
335 }
336}
337
338#define RANGE 500
339
340/* inline */
341void init_vec_orth1(int n, double *vec)
342{
343 /* randomly generate a vector orthogonal to 1 (i.e., with mean 0) */
344 int i;
345
346 for (i = 0; i < n; i++)
347 vec[i] = rand() % RANGE;
348
349 orthog1(n, vec);
350}
351
352/* inline */
353void
354right_mult_with_vector(vtx_data * matrix, int n, double *vector,
355 double *result)
356{
357 int i, j;
358
359 double res;
360 for (i = 0; i < n; i++) {
361 res = 0;
362 for (j = 0; j < matrix[i].nedges; j++)
363 res += matrix[i].ewgts[j] * vector[matrix[i].edges[j]];
364 result[i] = res;
365 }
366 /* orthog1(n,vector); */
367}
368
369/* inline */
370void
371right_mult_with_vector_f(float **matrix, int n, double *vector,
372 double *result)
373{
374 int i, j;
375
376 double res;
377 for (i = 0; i < n; i++) {
378 res = 0;
379 for (j = 0; j < n; j++)
380 res += matrix[i][j] * vector[j];
381 result[i] = res;
382 }
383 /* orthog1(n,vector); */
384}
385
386/* inline */
387void
388vectors_subtraction(int n, double *vector1, double *vector2,
389 double *result)
390{
391 int i;
392 for (i = 0; i < n; i++) {
393 result[i] = vector1[i] - vector2[i];
394 }
395}
396
397/* inline */
398void
399vectors_addition(int n, double *vector1, double *vector2, double *result)
400{
401 int i;
402 for (i = 0; i < n; i++) {
403 result[i] = vector1[i] + vector2[i];
404 }
405}
406
407#ifdef UNUSED
408/* inline */
409void
410vectors_mult_addition(int n, double *vector1, double alpha,
411 double *vector2)
412{
413 int i;
414 for (i = 0; i < n; i++) {
415 vector1[i] = vector1[i] + alpha * vector2[i];
416 }
417}
418#endif
419
420/* inline */
421void
422vectors_scalar_mult(int n, double *vector, double alpha, double *result)
423{
424 int i;
425 for (i = 0; i < n; i++) {
426 result[i] = vector[i] * alpha;
427 }
428}
429
430/* inline */
431void copy_vector(int n, double *source, double *dest)
432{
433 int i;
434 for (i = 0; i < n; i++)
435 dest[i] = source[i];
436}
437
438/* inline */
439double vectors_inner_product(int n, double *vector1, double *vector2)
440{
441 int i;
442 double result = 0;
443 for (i = 0; i < n; i++) {
444 result += vector1[i] * vector2[i];
445 }
446
447 return result;
448}
449
450/* inline */
451double max_abs(int n, double *vector)
452{
453 double max_val = -1e50;
454 int i;
455 for (i = 0; i < n; i++)
456 if (fabs(vector[i]) > max_val)
457 max_val = fabs(vector[i]);
458
459 return max_val;
460}
461
462#ifdef UNUSED
463/* inline */
464void orthogvec(int n, double *vec1, /* vector to be orthogonalized */
465 double *vec2 /* normalized vector to be orthogonalized against */
466 )
467{
468 double alpha;
469 if (vec2 == NULL) {
470 return;
471 }
472
473 alpha = -vectors_inner_product(n, vec1, vec2);
474
475 vectors_mult_addition(n, vec1, alpha, vec2);
476}
477
478 /* sparse matrix extensions: */
479
480/* inline */
481void mat_mult_vec(vtx_data * L, int n, double *vec, double *result)
482{
483 /* compute result= -L*vec */
484 int i, j;
485 double sum;
486 int *edges;
487 float *ewgts;
488
489 for (i = 0; i < n; i++) {
490 sum = 0;
491 edges = L[i].edges;
492 ewgts = L[i].ewgts;
493 for (j = 0; j < L[i].nedges; j++) {
494 sum -= ewgts[j] * vec[edges[j]];
495 }
496 result[i] = sum;
497 }
498}
499#endif
500
501/* inline */
502void
503right_mult_with_vector_transpose(double **matrix,
504 int dim1, int dim2,
505 double *vector, double *result)
506{
507 /* matrix is dim2 x dim1, vector has dim2 components, result=matrix^T x vector */
508 int i, j;
509
510 double res;
511 for (i = 0; i < dim1; i++) {
512 res = 0;
513 for (j = 0; j < dim2; j++)
514 res += matrix[j][i] * vector[j];
515 result[i] = res;
516 }
517}
518
519/* inline */
520void
521right_mult_with_vector_d(double **matrix,
522 int dim1, int dim2,
523 double *vector, double *result)
524{
525 /* matrix is dim1 x dim2, vector has dim2 components, result=matrix x vector */
526 int i, j;
527
528 double res;
529 for (i = 0; i < dim1; i++) {
530 res = 0;
531 for (j = 0; j < dim2; j++)
532 res += matrix[i][j] * vector[j];
533 result[i] = res;
534 }
535}
536
537
538/*****************************
539** Single precision (float) **
540** version **
541*****************************/
542
543/* inline */
544void orthog1f(int n, float *vec)
545{
546 int i;
547 float *pntr;
548 float sum;
549
550 sum = 0.0;
551 pntr = vec;
552 for (i = n; i; i--) {
553 sum += *pntr++;
554 }
555 sum /= n;
556 pntr = vec;
557 for (i = n; i; i--) {
558 *pntr++ -= sum;
559 }
560}
561
562#ifdef UNUSED
563/* inline */
564void right_mult_with_vectorf
565 (vtx_data * matrix, int n, float *vector, float *result) {
566 int i, j;
567
568 float res;
569 for (i = 0; i < n; i++) {
570 res = 0;
571 for (j = 0; j < matrix[i].nedges; j++)
572 res += matrix[i].ewgts[j] * vector[matrix[i].edges[j]];
573 result[i] = res;
574 }
575}
576
577/* inline */
578void right_mult_with_vector_fd
579 (float **matrix, int n, float *vector, double *result) {
580 int i, j;
581
582 float res;
583 for (i = 0; i < n; i++) {
584 res = 0;
585 for (j = 0; j < n; j++)
586 res += matrix[i][j] * vector[j];
587 result[i] = res;
588 }
589}
590#endif
591
592/* inline */
593void right_mult_with_vector_ff
594 (float *packed_matrix, int n, float *vector, float *result) {
595 /* packed matrix is the upper-triangular part of a symmetric matrix arranged in a vector row-wise */
596 int i, j, index;
597 float vector_i;
598
599 float res;
600 for (i = 0; i < n; i++) {
601 result[i] = 0;
602 }
603 for (index = 0, i = 0; i < n; i++) {
604 res = 0;
605 vector_i = vector[i];
606 /* deal with main diag */
607 res += packed_matrix[index++] * vector_i;
608 /* deal with off diag */
609 for (j = i + 1; j < n; j++, index++) {
610 res += packed_matrix[index] * vector[j];
611 result[j] += packed_matrix[index] * vector_i;
612 }
613 result[i] += res;
614 }
615}
616
617/* inline */
618void
619vectors_substractionf(int n, float *vector1, float *vector2, float *result)
620{
621 int i;
622 for (i = 0; i < n; i++) {
623 result[i] = vector1[i] - vector2[i];
624 }
625}
626
627/* inline */
628void
629vectors_additionf(int n, float *vector1, float *vector2, float *result)
630{
631 int i;
632 for (i = 0; i < n; i++) {
633 result[i] = vector1[i] + vector2[i];
634 }
635}
636
637/* inline */
638void
639vectors_mult_additionf(int n, float *vector1, float alpha, float *vector2)
640{
641 int i;
642 for (i = 0; i < n; i++) {
643 vector1[i] = vector1[i] + alpha * vector2[i];
644 }
645}
646
647/* inline */
648void vectors_scalar_multf(int n, float *vector, float alpha, float *result)
649{
650 int i;
651 for (i = 0; i < n; i++) {
652 result[i] = (float) vector[i] * alpha;
653 }
654}
655
656/* inline */
657void copy_vectorf(int n, float *source, float *dest)
658{
659 int i;
660 for (i = 0; i < n; i++)
661 dest[i] = source[i];
662}
663
664/* inline */
665double vectors_inner_productf(int n, float *vector1, float *vector2)
666{
667 int i;
668 double result = 0;
669 for (i = 0; i < n; i++) {
670 result += vector1[i] * vector2[i];
671 }
672
673 return result;
674}
675
676/* inline */
677void set_vector_val(int n, double val, double *result)
678{
679 int i;
680 for (i = 0; i < n; i++)
681 result[i] = val;
682}
683
684/* inline */
685void set_vector_valf(int n, float val, float* result)
686{
687 int i;
688 for (i = 0; i < n; i++)
689 result[i] = val;
690}
691
692/* inline */
693double max_absf(int n, float *vector)
694{
695 int i;
696 float max_val = -1e30f;
697 for (i = 0; i < n; i++)
698 if (fabs(vector[i]) > max_val)
699 max_val = (float) (fabs(vector[i]));
700
701 return max_val;
702}
703
704/* inline */
705void square_vec(int n, float *vec)
706{
707 int i;
708 for (i = 0; i < n; i++) {
709 vec[i] *= vec[i];
710 }
711}
712
713/* inline */
714void invert_vec(int n, float *vec)
715{
716 int i;
717 float v;
718 for (i = 0; i < n; i++) {
719 if ((v = vec[i]) != 0.0)
720 vec[i] = 1.0f / v;
721 }
722}
723
724/* inline */
725void sqrt_vec(int n, float *vec)
726{
727 int i;
728 double d;
729 for (i = 0; i < n; i++) {
730 /* do this in two steps to avoid a bug in gcc-4.00 on AIX */
731 d = sqrt(vec[i]);
732 vec[i] = (float) d;
733 }
734}
735
736/* inline */
737void sqrt_vecf(int n, float *source, float *target)
738{
739 int i;
740 double d;
741 float v;
742 for (i = 0; i < n; i++) {
743 if ((v = source[i]) >= 0.0) {
744 /* do this in two steps to avoid a bug in gcc-4.00 on AIX */
745 d = sqrt(v);
746 target[i] = (float) d;
747 }
748 }
749}
750
751/* inline */
752void invert_sqrt_vec(int n, float *vec)
753{
754 int i;
755 double d;
756 float v;
757 for (i = 0; i < n; i++) {
758 if ((v = vec[i]) > 0.0) {
759 /* do this in two steps to avoid a bug in gcc-4.00 on AIX */
760 d = 1. / sqrt(v);
761 vec[i] = (float) d;
762 }
763 }
764}
765
766#ifdef UNUSED
767/* inline */
768void init_vec_orth1f(int n, float *vec)
769{
770 /* randomly generate a vector orthogonal to 1 (i.e., with mean 0) */
771 int i;
772
773 for (i = 0; i < n; i++)
774 vec[i] = (float) (rand() % RANGE);
775
776 orthog1f(n, vec);
777}
778
779
780 /* sparse matrix extensions: */
781
782/* inline */
783void mat_mult_vecf(vtx_data * L, int n, float *vec, float *result)
784{
785 /* compute result= -L*vec */
786 int i, j;
787 float sum;
788 int *edges;
789 float *ewgts;
790
791 for (i = 0; i < n; i++) {
792 sum = 0;
793 edges = L[i].edges;
794 ewgts = L[i].ewgts;
795 for (j = 0; j < L[i].nedges; j++) {
796 sum -= ewgts[j] * vec[edges[j]];
797 }
798 result[i] = sum;
799 }
800}
801#endif
802
803#endif
804