1/* $Id$ $Revision$ */
2/* vim:set shiftwidth=4 ts=8: */
3
4/*************************************************************************
5 * Copyright (c) 2011 AT&T Intellectual Property
6 * All rights reserved. This program and the accompanying materials
7 * are made available under the terms of the Eclipse Public License v1.0
8 * which accompanies this distribution, and is available at
9 * http://www.eclipse.org/legal/epl-v10.html
10 *
11 * Contributors: See CVS logs. Details at http://www.graphviz.org/
12 *************************************************************************/
13
14#include "dot.h"
15
16/*
17 * Author: Mohammad T. Irfan
18 * Summer, 2008
19 */
20
21/* TODO:
22 * - Support clusters
23 * - Support disconnected graphs
24 * - Provide algorithms for aspect ratios < 1
25 */
26
27#define MIN_AR 1.0
28#define MAX_AR 20.0
29#define DEF_PASSES 5
30#define DPI 72
31
32/*
33 * NODE GROUPS FOR SAME RANKING
34 * Node group data structure groups nodes together for
35 * MIN, MAX, SOURCE, SINK constraints.
36 * The grouping is based on the union-find data structure and
37 * provides sequential access to the nodes in the same group.
38 */
39
40/* data structure for node groups */
41typedef struct nodeGroup_t {
42 node_t **nodes;
43 int nNodes;
44 double width, height;
45} nodeGroup_t;
46
47static nodeGroup_t *nodeGroups;
48static int nNodeGroups = 0;
49
50/* computeNodeGroups:
51 * computeNodeGroups function does the groupings of nodes.
52 * The grouping is based on the union-find data structure.
53 */
54static void computeNodeGroups(graph_t * g)
55{
56 node_t *n;
57
58 nodeGroups = N_GNEW(agnnodes(g), nodeGroup_t);
59
60 nNodeGroups = 0;
61
62 /* initialize node ids. Id of a node is used as an index to the group. */
63 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
64 ND_id(n) = -1;
65 }
66
67 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
68 if (ND_UF_size(n) == 0) { /* no same ranking constraint */
69 nodeGroups[nNodeGroups].nodes = NEW(node_t *);
70 nodeGroups[nNodeGroups].nodes[0] = n;
71 nodeGroups[nNodeGroups].nNodes = 1;
72 nodeGroups[nNodeGroups].width = ND_width(n);
73 nodeGroups[nNodeGroups].height = ND_height(n);
74
75 ND_id(n) = nNodeGroups;
76 nNodeGroups++;
77 } else /* group same ranked nodes */
78 {
79 node_t *l = UF_find(n);
80 if (ND_id(l) > -1) /* leader is already grouped */
81 {
82 int index = ND_id(l);
83 nodeGroups[index].nodes[nodeGroups[index].nNodes++] = n;
84 nodeGroups[index].width += ND_width(n);
85 nodeGroups[index].height =
86 (nodeGroups[index].height <
87 ND_height(n)) ? ND_height(n) : nodeGroups[index].
88 height;
89
90 ND_id(n) = index;
91 } else /* create a new group */
92 {
93 nodeGroups[nNodeGroups].nodes =
94 N_NEW(ND_UF_size(l), node_t *);
95
96 if (l == n) /* node n is the leader */
97 {
98 nodeGroups[nNodeGroups].nodes[0] = l;
99 nodeGroups[nNodeGroups].nNodes = 1;
100 nodeGroups[nNodeGroups].width = ND_width(l);
101 nodeGroups[nNodeGroups].height = ND_height(l);
102 } else {
103 nodeGroups[nNodeGroups].nodes[0] = l;
104 nodeGroups[nNodeGroups].nodes[1] = n;
105 nodeGroups[nNodeGroups].nNodes = 2;
106 nodeGroups[nNodeGroups].width =
107 ND_width(l) + ND_width(n);
108 nodeGroups[nNodeGroups].height =
109 (ND_height(l) <
110 ND_height(n)) ? ND_height(n) : ND_height(l);
111 }
112
113 ND_id(l) = nNodeGroups;
114 ND_id(n) = nNodeGroups;
115 nNodeGroups++;
116 }
117 }
118 }
119
120}
121
122/*
123 * END OF CODES FOR NODE GROUPS
124 */
125
126/* countDummyNodes:
127 * Count the number of dummy nodes
128 */
129int countDummyNodes(graph_t * g)
130{
131 int count = 0;
132 node_t *n;
133 edge_t *e;
134
135 /* Count dummy nodes */
136 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
137 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
138#ifdef UNUSED
139 /* this loop can be avoided */
140 for (k = ND_rank(agtail(e))+1; k < ND_rank(aghead(e)); k++) {
141 count++;
142 }
143#endif
144 /* flat edges do not have dummy nodes */
145 if (ND_rank(aghead(e)) != ND_rank(agtail(e)))
146 count += abs(ND_rank(aghead(e)) - ND_rank(agtail(e))) - 1;
147 }
148 }
149 return count;
150}
151
152/*
153 * FFDH PACKING ALGORITHM TO ACHIEVE TARGET ASPECT RATIO
154 */
155
156/*
157 * layerWidthInfo_t: data structure for keeping layer width information
158 * Each layer consists of a number of node groups.
159 */
160typedef struct layerWidthInfo_t {
161 int layerNumber;
162 nodeGroup_t **nodeGroupsInLayer;
163 int *removed; /* is the node group removed? */
164 int nNodeGroupsInLayer;
165 int nDummyNodes;
166 double width;
167 double height;
168} layerWidthInfo_t;
169
170static layerWidthInfo_t *layerWidthInfo = NULL;
171static int *sortedLayerIndex;
172static int nLayers = 0;
173
174/* computeLayerWidths:
175 */
176static void computeLayerWidths(graph_t * g)
177{
178 int i;
179 node_t *v;
180 node_t *n;
181 edge_t *e;
182
183 nLayers = 0;
184
185 /* free previously allocated memory */
186 if (layerWidthInfo) {
187 for (i = 0; i < nNodeGroups; i++) {
188 if (layerWidthInfo[i].nodeGroupsInLayer) {
189 int j;
190 for (j = 0; j < layerWidthInfo[i].nNodeGroupsInLayer; j++) {
191 //if (layerWidthInfo[i].nodeGroupsInLayer[j])
192 //free(layerWidthInfo[i].nodeGroupsInLayer[j]);
193 }
194 free(layerWidthInfo[i].nodeGroupsInLayer);
195 }
196 if (layerWidthInfo[i].removed)
197 free(layerWidthInfo[i].removed);
198 }
199
200 free(layerWidthInfo);
201 }
202 /* allocate memory
203 * the number of layers can go up to the number of node groups
204 */
205 layerWidthInfo = N_NEW(nNodeGroups, layerWidthInfo_t);
206
207 for (i = 0; i < nNodeGroups; i++) {
208 layerWidthInfo[i].nodeGroupsInLayer =
209 N_NEW(nNodeGroups, nodeGroup_t *);
210
211 layerWidthInfo[i].removed = N_NEW(nNodeGroups, int);
212
213 layerWidthInfo[i].layerNumber = i;
214 layerWidthInfo[i].nNodeGroupsInLayer = 0;
215 layerWidthInfo[i].nDummyNodes = 0;
216 layerWidthInfo[i].width = 0.0;
217 layerWidthInfo[i].height = 0.0;
218 }
219
220
221
222 /* Count dummy nodes in the layer */
223 for (n = agfstnode(g); n; n = agnxtnode(g, n))
224 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
225 int k;
226
227 /* FIX: This loop maybe unnecessary, but removing it and using
228 * the commented codes next, gives a segmentation fault. I
229 * forgot the reason why.
230 */
231 for (k = ND_rank(agtail(e)) + 1; k < ND_rank(aghead(e)); k++) {
232 layerWidthInfo[k].nDummyNodes++;
233 }
234
235#ifdef UNUSED
236 if (ND_rank(aghead(e)) != ND_rank(agtail(e)))
237 /* flat edges do not have dummy nodes */
238 layerWidthInfo[k].nDummyNodes = abs(ND_rank(aghead(e)) - ND_rank(agtail(e))) - 1;
239#endif
240 }
241
242#ifdef UNUSED
243 /*****************************************************************
244 * This code is removed. It considers dummy nodes in layer width,
245 * which does not give good results in experiments.
246 *****************************************************************/
247
248 for (i = 0; i < nNodeGroups; i++) {
249 v = nodeGroups[i].nodes[0];
250 layerWidthInfo[ND_rank(v)].width = (layerWidthInfo[ND_rank(v)].nDummyNodes - 1) * GD_nodesep(g);
251 }
252#endif
253
254 /* gather the layer information */
255 for (i = 0; i < nNodeGroups; i++) {
256 v = nodeGroups[i].nodes[0];
257 if (ND_rank(v) + 1 > nLayers) /* update the number of layers */
258 nLayers = ND_rank(v) + 1;
259
260 layerWidthInfo[ND_rank(v)].width +=
261 nodeGroups[i].width * DPI + (layerWidthInfo[ND_rank(v)].width >
262 0) * GD_nodesep(g);
263 if (layerWidthInfo[ND_rank(v)].height < nodeGroups[i].height * DPI)
264 layerWidthInfo[ND_rank(v)].height = nodeGroups[i].height * DPI;
265 layerWidthInfo[ND_rank(v)].
266 nodeGroupsInLayer[layerWidthInfo[ND_rank(v)].
267 nNodeGroupsInLayer] = &nodeGroups[i];
268 layerWidthInfo[ND_rank(v)].nNodeGroupsInLayer++;
269 }
270
271}
272
273/* compFunction:
274 * Comparison function to be used in qsort.
275 * For sorting the layers by nonincreasing width
276 */
277static int compFunction(const void *a, const void *b)
278{
279 int *ind1 = (int *) a;
280 int *ind2 = (int *) b;
281
282 return (layerWidthInfo[*ind2].width >
283 layerWidthInfo[*ind1].width) - (layerWidthInfo[*ind2].width <
284 layerWidthInfo[*ind1].width);
285}
286
287/* sortLayers:
288 * Sort the layers by width (nonincreasing order)
289 * qsort should be replaced by insertion sort for better performance.
290 * (layers are "almost" sorted during iterations)
291 */
292static void sortLayers(graph_t * g)
293{
294 qsort(sortedLayerIndex, agnnodes(g), sizeof(int), compFunction);
295}
296
297#ifdef UNUSED
298/* getMaxDummyNodes:
299 * get the max # of dummy nodes on the incoming edges to a nodeGroup
300 */
301static int getMaxDummyNodes(nodeGroup_t * ng)
302{
303 int i, max = 0, cnt = 0;
304 for (i = 0; i < ng->nNodes; i++) {
305 node_t *n = ng->nodes[i];
306 edge_t *e;
307 graph_t *g = agraphof(n);
308 for (e = agfstin(g, n); e; e = agnxtin(g, e)) {
309 cnt += ND_rank(aghead(e)) - ND_rank(agtail(e)); // it's 1 more than the original count
310 if (ND_rank(aghead(e)) - ND_rank(agtail(e)) > max)
311 max = ND_rank(aghead(e)) - ND_rank(agtail(e));
312 }
313 }
314
315 return max;
316}
317#endif
318
319/* getOutDegree:
320 * Return the sum of out degrees of the nodes in a node group.
321 */
322static int getOutDegree(nodeGroup_t * ng)
323{
324 int i, cnt = 0;
325 for (i = 0; i < ng->nNodes; i++) {
326 node_t *n = ng->nodes[i];
327 edge_t *e;
328 graph_t *g = agraphof(n);
329
330 /* count outdegree. This loop might be unnecessary. */
331 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
332 cnt++;
333 }
334 }
335
336 return cnt;
337}
338
339/* compFunction2:
340 * Comparison function to be used in qsort.
341 * For sorting the node groups by their out degrees (nondecreasing)
342 */
343static int compFunction2(const void *a, const void *b)
344{
345 nodeGroup_t **ind1 = (nodeGroup_t **) a, **ind2 = (nodeGroup_t **) b;
346
347 int cnt1 = getOutDegree(*ind1);
348 int cnt2 = getOutDegree(*ind2);
349
350 return (cnt2 < cnt1) - (cnt2 > cnt1);
351}
352
353#ifdef UNUSED
354/* compFunction3:
355 * Comparison function to be used in qsort.
356 * For sorting the node groups by their height & width
357 */
358static int compFunction3(const void *a, const void *b)
359{
360 nodeGroup_t **ind1 = (nodeGroup_t **) a, **ind2 = (nodeGroup_t **) b;
361 if ((*ind2)->height == (*ind1)->height)
362 return ((*ind2)->width < (*ind1)->width) - ((*ind2)->width >
363 (*ind1)->width);
364
365 return ((*ind2)->height < (*ind1)->height) - ((*ind2)->height >
366 (*ind1)->height);
367}
368
369/*****************************************************************
370 * The following commented codes are no longer used
371 * Originally used in the cocktail tool.
372 *****************************************************************/
373/* checkLayerConstraints:
374 * check if there is any node group in the layer
375 * that is not constrained by MIN/MAX/SOURCE/SINK-RANK constraints.
376 */
377static int checkLayerConstraints(layerWidthInfo_t lwi)
378{
379 int i;
380 for (i = 0; i < lwi.nNodeGroupsInLayer; i++) {
381 if (lwi.nodeGroupsInLayer[i]->nNodes > 0) {
382 int rtype = ND_ranktype(lwi.nodeGroupsInLayer[i]->nodes[0]);
383 if (rtype != MINRANK && rtype != MAXRANK && rtype != SOURCERANK
384 && rtype != SINKRANK)
385 return 1;
386 }
387 }
388
389 return 0;
390}
391
392/* checkLayerConstraints:
393 * check if all the node groups in the layer are
394 * constrained by MIN/MAX/SOURCE/SINK-RANK constraints
395 */
396static int checkLayerConstraints(layerWidthInfo_t lwi)
397{
398 int i;
399 for (i = 0; i < lwi.nNodeGroupsInLayer; i++) {
400 if (lwi.nodeGroupsInLayer[i]->nNodes > 0) {
401 int rtype = ND_ranktype(lwi.nodeGroupsInLayer[i]->nodes[0]);
402 if (rtype != MINRANK && rtype != MAXRANK && rtype != SOURCERANK
403 && rtype != SINKRANK)
404 return 0;
405 }
406 }
407
408 return 1;
409}
410
411/* checkNodeGroupConstraints:
412 * check if the node group is not constrained by
413 * MIN/MAX/SOURCE/SINK-RANK constraints
414 * Only used in the cocktail tool.
415 */
416static int checkNodeGroupConstraints(nodeGroup_t * ndg)
417{
418 int i;
419 int rtype = ND_ranktype(ndg->nodes[0]);
420
421 if (rtype != MINRANK && rtype != MAXRANK && rtype != SOURCERANK
422 && rtype != SINKRANK)
423 return 1;
424
425 return 0;
426}
427
428/* checkHorizontalEdge:
429 * check if there is an edge from ng to a node in
430 * layerWidthInfo[nextLayerIndex].
431 * Only used in the cocktail tool.
432 */
433static int
434checkHorizontalEdge(graph_t * g, nodeGroup_t * ng, int nextLayerIndex)
435{
436 int i;
437 edge_t *e;
438
439 for (i = 0; i < ng->nNodes; i++) {
440 for (e = agfstout(g, ng->nodes[i]); e; e = agnxtout(g, e)) {
441 if (layerWidthInfo[nextLayerIndex].layerNumber ==
442 ND_rank(aghead(e))) {
443 return 1;
444 }
445 }
446 }
447
448
449 return 0;
450}
451
452/* hasMaxOrSinkNodes:
453 * check if the the layer lwi has MAX or SINK nodes
454 * Only used in the cocktail tool.
455 */
456static int hasMaxOrSinkNodes(layerWidthInfo_t * lwi)
457{
458 int i, j;
459
460 for (i = 0; i < lwi->nNodeGroupsInLayer; i++) {
461 if (lwi->removed[i])
462 continue;
463 for (j = 0; j < lwi->nodeGroupsInLayer[i]->nNodes; j++) {
464 if (ND_ranktype(lwi->nodeGroupsInLayer[i]->nodes[j]) == MAXRANK
465 || ND_ranktype(lwi->nodeGroupsInLayer[i]->nodes[j]) ==
466 SINKRANK)
467 return 1;
468 }
469 }
470
471 return 0;
472}
473
474/* reduceMaxWidth:
475 * The following function is no longer used.
476 * Originally used for FFDH packing heuristic
477 * FFDH procedure
478 */
479static void reduceMaxWidth(graph_t * g)
480{
481 int i;
482 int maxLayerIndex; // = sortedLayerIndex[0];
483 double nextMaxWidth; // = (nLayers > 1) ? layerWidthInfo[sortedLayerIndex[1]].width : 0;
484 double w = 0;
485 Agnode_t *v;
486
487 for (i = 0; i < nLayers; i++) {
488 if (layerWidthInfo[sortedLayerIndex[i]].nNodeGroupsInLayer <= 1) // || !checkLayerConstraints(layerWidthInfo[sortedLayerIndex[i]]))
489 continue;
490 else {
491 maxLayerIndex = sortedLayerIndex[i];
492 nextMaxWidth =
493 (nLayers >
494 i + 1) ? layerWidthInfo[sortedLayerIndex[i +
495 1]].width : 0;
496 break;
497 }
498 }
499
500 if (i == nLayers)
501 return; //reduction of layerwidth is not possible.
502
503
504 //sort the node groups in maxLayerIndex layer by height and then width, nonincreasing
505 qsort(layerWidthInfo[maxLayerIndex].nodeGroupsInLayer,
506 layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer,
507 sizeof(nodeGroup_t *), compFunction2);
508 //printf("ht0 = %lf, ht1 = %lf\n", ND_height(layerWidthInfo[maxLayerIndex].nodes[0]), ND_height(layerWidthInfo[maxLayerIndex].nodes[1]));
509
510
511 if (nextMaxWidth <= layerWidthInfo[maxLayerIndex].width / 2
512 || nextMaxWidth == layerWidthInfo[maxLayerIndex].width)
513 nextMaxWidth = layerWidthInfo[maxLayerIndex].width / 2;
514
515 double targetWidth = nextMaxWidth; //layerWidthInfo[maxLayerIndex].width/2;
516
517 //printf("max = %lf, target = %lf\n", layerWidthInfo[maxLayerIndex].width, targetWidth);//, w + (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->width );
518 //getchar();
519
520
521 //packing by node demotion
522 int nextLayerIndex = -1;
523 for (i = 0; i < nLayers; i++) {
524 if (layerWidthInfo[i].layerNumber ==
525 layerWidthInfo[maxLayerIndex].layerNumber + 1)
526 nextLayerIndex = i;
527 }
528
529 if (nextLayerIndex > -1) {
530 //if (layerWidthInfo[nextLayerIndex].width <= 0.5*layerWidthInfo[maxLayerIndex].width)
531 //{
532 int changed = 0;
533 //demote nodes to the next layer
534 for (i = 0; i < layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer;
535 i++) {
536 if (layerWidthInfo[maxLayerIndex].removed[i])
537 continue;
538
539 if (!checkHorizontalEdge
540 (g, layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i],
541 nextLayerIndex)
542 && w + layerWidthInfo[nextLayerIndex].width +
543 (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->
544 width <= targetWidth) {
545 w += (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->
546 width;
547 changed++;
548
549 int j;
550 nodeGroup_t *ng =
551 layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i];
552
553 layerWidthInfo[maxLayerIndex].removed[i] = 1;
554 layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer--;
555 layerWidthInfo[maxLayerIndex].width -= ng->width;
556 for (j = 0; j < ng->nNodes; j++)
557 ND_rank(ng->nodes[j])++;
558
559
560 layerWidthInfo[nextLayerIndex].
561 nodeGroupsInLayer[layerWidthInfo[nextLayerIndex].
562 nNodeGroupsInLayer] = ng;
563 layerWidthInfo[nextLayerIndex].
564 removed[layerWidthInfo[nextLayerIndex].
565 nNodeGroupsInLayer] = 0;
566 layerWidthInfo[nextLayerIndex].nNodeGroupsInLayer++;
567 layerWidthInfo[nextLayerIndex].width += ng->width;
568
569 //int jj;
570 //for (jj = 0; jj < layerWidthInfo[nextLayerIndex].nNodeGroupsInLayer; jj++) {
571 //Agnode_t *node = layerWidthInfo[nextLayerIndex].nodeGroupsInLayer[jj]->nodes[0];
572 //printf("%s\n", agnameof(node));
573 //}
574 }
575
576 }
577
578 if (changed) {
579 //printf("Demoted %d nodes\n", changed);
580 return;
581 }
582 //}
583 }
584 //packing by creating new layers. Must be commented out if packing by demotion is used
585
586 //going to create a new layer. increase the rank of all higher ranked nodes. (to be modified...)
587 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
588 if (ND_rank(v) > layerWidthInfo[maxLayerIndex].layerNumber) ///////////******** +1
589 ND_rank(v)++;
590 }
591
592 for (i = 0; i < nLayers; i++) {
593 if (layerWidthInfo[i].layerNumber >
594 layerWidthInfo[maxLayerIndex].layerNumber)
595 layerWidthInfo[i].layerNumber++;
596 }
597
598 //now partition the current layer into two layers (to be modified to support general case of > 2 layers)
599 int flag = 0; //is a new layer created?
600 int alt = 0;
601 for (i = 0; i < layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer; i++) {
602 if (layerWidthInfo[maxLayerIndex].removed[i])
603 continue;
604
605 //nodesep-> only if there are > 1 nodes*******************************
606 if ((w +
607 (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->width *
608 DPI + (w > 0) * GD_nodesep(g) <= targetWidth && alt == 0
609 && ND_ranktype(layerWidthInfo[maxLayerIndex].
610 nodeGroupsInLayer[i]->nodes[0]) != SINKRANK
611 && ND_ranktype(layerWidthInfo[maxLayerIndex].
612 nodeGroupsInLayer[i]->nodes[0]) != MAXRANK)
613 ||
614 (ND_ranktype
615 (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i]->
616 nodes[0]) != SINKRANK
617 && ND_ranktype(layerWidthInfo[maxLayerIndex].
618 nodeGroupsInLayer[i]->nodes[0]) != MAXRANK
619 && hasMaxOrSinkNodes(&layerWidthInfo[maxLayerIndex]))
620 )
621 //&& ND_pinned(layerWidthInfo[maxLayerIndex].nodes[i]) == 0 )
622 {
623 w += (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->
624 width * DPI + (w > 0) * GD_nodesep(g);
625 alt = 1;
626 } else {
627 int j;
628 nodeGroup_t *ng =
629 layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i];
630
631 flag = 1;
632
633 layerWidthInfo[maxLayerIndex].removed[i] = 1;
634 layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer--;
635 layerWidthInfo[maxLayerIndex].nDummyNodes++; /////************** SHOULD BE INCREASED BY THE SUM OF INDEG OF ALL NODES IN GROUP
636 layerWidthInfo[maxLayerIndex].width -=
637 (ng->width * DPI + GD_nodesep(g));
638 for (j = 0; j < ng->nNodes; j++)
639 ND_rank(ng->nodes[j])++;
640
641 //create new layer
642 layerWidthInfo[nLayers].
643 nodeGroupsInLayer[layerWidthInfo[nLayers].
644 nNodeGroupsInLayer] = ng;
645 layerWidthInfo[nLayers].nNodeGroupsInLayer++;
646 layerWidthInfo[nLayers].layerNumber = ND_rank(ng->nodes[0]);
647
648 layerWidthInfo[nLayers].width += (ng->width * DPI + (layerWidthInfo[nLayers].nNodeGroupsInLayer > 1) * GD_nodesep(g)); // just add the node widths now.
649
650 alt = 0;
651 }
652 }
653
654 if (flag) {
655 //calculate number of dummy nodes
656 node_t *n;
657 edge_t *e;
658 int nDummy = 0;
659
660 for (n = agfstnode(g); n; n = agnxtnode(g, n))
661 for (e = agfstout(g, n); e; e = agnxtout(g, e)) {
662 if ((ND_rank(aghead(e)) > layerWidthInfo[nLayers].layerNumber
663 && ND_rank(agtail(e)) <
664 layerWidthInfo[nLayers].layerNumber)
665 || (ND_rank(aghead(e)) <
666 layerWidthInfo[nLayers].layerNumber
667 && ND_rank(agtail(e)) >
668 layerWidthInfo[nLayers].layerNumber)
669 )
670 nDummy++;
671 }
672
673 layerWidthInfo[nLayers].nDummyNodes = nDummy;
674 layerWidthInfo[nLayers].width +=
675 (layerWidthInfo[nLayers].nDummyNodes - 1) * GD_nodesep(g);
676 nLayers++;
677 }
678
679 else {
680 //undo increment of ranks and layerNumbers.*****************
681 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
682 if (ND_rank(v) > layerWidthInfo[maxLayerIndex].layerNumber + 1)
683 ND_rank(v)--;
684 }
685
686 for (i = 0; i < nLayers; i++) {
687 if (layerWidthInfo[i].layerNumber >
688 layerWidthInfo[maxLayerIndex].layerNumber + 1)
689 layerWidthInfo[i].layerNumber--;
690 }
691 }
692}
693#endif
694
695/* reduceMaxWidth2:
696 * This is the main heuristic for partitioning the widest layer.
697 * Partitioning is based on outdegrees of nodes.
698 * Replace compFunction2 by compFunction3 if you want to partition
699 * by node widths and heights.
700 * FFDH procedure
701 */
702static void reduceMaxWidth2(graph_t * g)
703{
704 int i;
705 int maxLayerIndex = 0;
706 double nextMaxWidth = 0.0;
707 double w = 0;
708 double targetWidth;
709 int fst;
710 nodeGroup_t *fstNdGrp;
711 int ndem;
712 int p, q;
713 int limit;
714 int rem;
715 int rem2;
716
717
718 /* Find the widest layer. it must have at least 2 nodes. */
719 for (i = 0; i < nLayers; i++) {
720 if (layerWidthInfo[sortedLayerIndex[i]].nNodeGroupsInLayer <= 1)
721 continue;
722 else {
723 maxLayerIndex = sortedLayerIndex[i];
724 /* get the width of the next widest layer */
725 nextMaxWidth =
726 (nLayers >
727 i + 1) ? layerWidthInfo[sortedLayerIndex[i +
728 1]].width : 0;
729 break;
730 }
731 }
732
733 if (i == nLayers)
734 return; /* reduction of layerwidth is not possible. */
735
736 /* sort the node groups in maxLayerIndex layer by height and
737 * then width, nonincreasing
738 */
739 qsort(layerWidthInfo[maxLayerIndex].nodeGroupsInLayer,
740 layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer,
741 sizeof(nodeGroup_t *), compFunction2);
742
743#if 0
744 printf("\nSorted nodes in mx layer:\n---------------------------\n");
745 for (i = 0; i < layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer; i++) {
746 Agnode_t *node = layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i]->nodes[0];
747 printf("%s. width=%lf, height=%lf\n",
748 agnameof(node), node->width, node->height);
749 }
750#endif
751
752 if (nextMaxWidth <= layerWidthInfo[maxLayerIndex].width / 4
753 || nextMaxWidth >= layerWidthInfo[maxLayerIndex].width * 3 / 4)
754 nextMaxWidth = layerWidthInfo[maxLayerIndex].width / 2;
755
756 targetWidth = nextMaxWidth; /* layerWidthInfo[maxLayerIndex].width/2; */
757
758 /* now partition the current layer into two or more
759 * layers (determined by the ranking algorithm)
760 */
761 fst = 0;
762 ndem = 0;
763 limit = layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer;
764 rem = 0;
765 rem2 = 0;
766
767 /* initialize w, the width of the widest layer after partitioning */
768 w = 0;
769
770 for (i = 0; i < limit + rem; i++) {
771 if (layerWidthInfo[maxLayerIndex].removed[i]) {
772 rem++;
773 continue;
774 }
775
776 if ((w +
777 layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i]->width *
778 DPI + (w > 0) * GD_nodesep(g) <= targetWidth)
779 || !fst) {
780 w += (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->
781 width * DPI + (w > 0) * GD_nodesep(g);
782 if (!fst) {
783 fstNdGrp =
784 layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i];
785 fst = 1;
786 }
787 } else {
788 nodeGroup_t *ng =
789 layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i];
790
791
792#ifdef UNUSED
793 /* The following code corrects w by adding dummy node spacing.
794 * It's no longer used
795 */
796 int l;
797 for (l = 0; l < ng->nNodes; l++) {
798 w += (ND_in(ng->nodes[l]).size - 1) * GD_nodesep(g);
799 }
800#endif
801
802 for (p = 0; p < fstNdGrp->nNodes; p++)
803 for (q = 0; q < ng->nNodes; q++) {
804 //printf("Trying to add virtual edge: %s -> %s\n",
805 // agnameof(fstNdGrp->nodes[p]), agnameof(ng->nodes[q]));
806
807 /* The following code is for deletion of long virtual edges.
808 * It's no longer used.
809 */
810#ifdef UNUSED
811 for (s = ND_in(ng->nodes[q]).size - 1; s >= 0; s--) {
812 ev = ND_in(ng->nodes[q]).list[s];
813
814 edge_t *et;
815 int fail = 0;
816 cnt = 0;
817
818 for (et = agfstin(g, aghead(ev)); et;
819 et = agnxtin(g, et)) {
820 if (aghead(et) == aghead(ev)
821 && agtail(et) == agtail(ev)) {
822 fail = 1;
823 break;
824 }
825 }
826
827 if (fail) {
828 //printf("FAIL DETECTED\n");
829 continue;
830 }
831
832
833 if (ED_edge_type(ev) == VIRTUAL
834 && ND_rank(aghead(ev)) > ND_rank(agtail(ev)) + 1) {
835 //printf("%d. inNode= %s.deleted: %s->%s\n",
836 // test++, agnameof(ng->nodes[q]),
837 // agnameof(agtail(ev)), agnameof(aghead(ev)));
838
839 delete_fast_edge(ev);
840 free(ev);
841 }
842 }
843#endif
844
845 /* add a new virtual edge */
846 edge_t *newVEdge =
847 virtual_edge(fstNdGrp->nodes[p], ng->nodes[q],
848 NULL);
849 ED_edge_type(newVEdge) = VIRTUAL;
850 ndem++; /* increase number of node demotions */
851 }
852
853 /* the following code updates the layer width information. The
854 * update is not useful in the current version of the heuristic.
855 */
856 layerWidthInfo[maxLayerIndex].removed[i] = 1;
857 rem2++;
858 layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer--;
859 /* SHOULD BE INCREASED BY THE SUM OF INDEG OF ALL NODES IN GROUP */
860 layerWidthInfo[maxLayerIndex].nDummyNodes++;
861 layerWidthInfo[maxLayerIndex].width -=
862 (ng->width * DPI + GD_nodesep(g));
863 }
864 }
865}
866
867#ifdef UNUSED
868/* balanceLayers:
869 * The following is the layer balancing heuristic.
870 * Balance the widths of the layers as much as possible.
871 * It's no longer used.
872 */
873static void balanceLayers(graph_t * g)
874{
875 int maxLayerIndex, nextLayerIndex, i;
876 double maxWidth, w;
877
878 //get the max width layer number
879
880 for (i = 0; i < nLayers; i++) {
881 if (layerWidthInfo[sortedLayerIndex[i]].nNodeGroupsInLayer <= 1
882 ||
883 layerWidthInfo[sortedLayerIndex[i]].layerNumber + 1 == nLayers)
884 continue;
885 else {
886 maxLayerIndex = sortedLayerIndex[i];
887 maxWidth = layerWidthInfo[maxLayerIndex].width;
888 printf("Balancing: maxLayerIndex = %d\n", maxLayerIndex);
889 break;
890 }
891 }
892
893 if (i == nLayers)
894 return; //reduction of layerwidth is not possible.
895
896 //balancing ~~ packing by node demotion
897 nextLayerIndex = -1;
898 for (i = 0; i < nLayers; i++) {
899 if (layerWidthInfo[i].layerNumber ==
900 layerWidthInfo[maxLayerIndex].layerNumber + 1) {
901 nextLayerIndex = i;
902 }
903 }
904
905 if (nextLayerIndex > -1) {
906 //if (layerWidthInfo[nextLayerIndex].width <= 0.5*layerWidthInfo[maxLayerIndex].width)
907 //{
908 int changed = 0;
909 w = 0;
910
911 //demote nodes to the next layer
912 for (i = 0; i < layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer;
913 i++) {
914 if (layerWidthInfo[maxLayerIndex].removed[i])
915 continue;
916
917 if (!checkHorizontalEdge
918 (g, layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i],
919 nextLayerIndex)
920 && layerWidthInfo[nextLayerIndex].width
921 /*+ (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->width */
922 <= layerWidthInfo[maxLayerIndex].width
923 /*- (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->width*/
924 ) {
925 w += (layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i])->
926 width;
927 changed++;
928
929 int j;
930 nodeGroup_t *ng =
931 layerWidthInfo[maxLayerIndex].nodeGroupsInLayer[i];
932
933 layerWidthInfo[maxLayerIndex].removed[i] = 1;
934 layerWidthInfo[maxLayerIndex].nNodeGroupsInLayer--;
935 layerWidthInfo[maxLayerIndex].width -= (ng->width);
936 layerWidthInfo[maxLayerIndex].nDummyNodes++;
937 for (j = 0; j < ng->nNodes; j++)
938 ND_rank(ng->nodes[j])++;
939
940
941 layerWidthInfo[nextLayerIndex].
942 nodeGroupsInLayer[layerWidthInfo[nextLayerIndex].
943 nNodeGroupsInLayer] = ng;
944 layerWidthInfo[nextLayerIndex].
945 removed[layerWidthInfo[nextLayerIndex].
946 nNodeGroupsInLayer] = 0;
947 layerWidthInfo[nextLayerIndex].nNodeGroupsInLayer++;
948 layerWidthInfo[nextLayerIndex].width +=
949 (ng->width + GD_nodesep(g));
950 }
951
952 }
953
954 if (changed) {
955 //printf("Demoted %d nodes\n", changed);
956 return;
957 }
958 //}
959 }
960}
961
962/* applyPacking:
963 * The following is the initial packing heuristic
964 * It's no longer used.
965 */
966static void applyPacking(graph_t * g, double targetAR)
967{
968 int i;
969
970 sortedLayerIndex = N_NEW(agnnodes(g), int);
971
972 for (i = 0; i < agnnodes(g); i++) {
973 sortedLayerIndex[i] = i;
974 }
975
976
977 node_t *v;
978
979 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
980 //printf("%s, rank = %d, ranktype = %d\n", agnameof(v), ND_rank(v), ND_ranktype(v));
981 }
982
983 //GD_nodesep(g) = 0.25;
984 //GD_ranksep(g) = 0.25;
985 ////////////////////
986 //printf("Nodesep = %d, Ranksep = %d\n",GD_nodesep(g), GD_ranksep(g));
987
988
989 for (i = 0; i < 1; i++) {
990 //printf("iteration = %d\n----------------------\n", i);
991 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
992 //printf("%s rank = %d\n", agnameof(v), ND_rank(v));
993 }
994
995 computeLayerWidths(g);
996 sortLayers(g);
997 reduceMaxWidth(g);
998
999 //printf("====================\n");
1000 }
1001
1002
1003 int k;
1004
1005 for (k = 0; k < nLayers - 1; k++) {
1006 int cnt = 0, tg;
1007 if (layerWidthInfo[k].nNodeGroupsInLayer > 7) {
1008
1009 cnt = 0;
1010 tg = layerWidthInfo[k].nNodeGroupsInLayer - 7;
1011
1012 for (i = layerWidthInfo[k].nNodeGroupsInLayer - 1; i >= 0; i--) {
1013
1014 if (layerWidthInfo[k].removed[i])
1015 continue;
1016
1017 int j;
1018 nodeGroup_t *ng = layerWidthInfo[k].nodeGroupsInLayer[i];
1019
1020
1021 layerWidthInfo[k].removed[i] = 1;
1022 layerWidthInfo[k].nNodeGroupsInLayer--;
1023 layerWidthInfo[k].nDummyNodes++;
1024 layerWidthInfo[k].width -=
1025 (ng->width * DPI + GD_nodesep(g));
1026 for (j = 0; j < ng->nNodes; j++)
1027 ND_rank(ng->nodes[j])++;
1028
1029 //create new layer
1030 layerWidthInfo[k +
1031 1].nodeGroupsInLayer[layerWidthInfo[k +
1032 1].
1033 nNodeGroupsInLayer] =
1034 ng;
1035 layerWidthInfo[k + 1].nNodeGroupsInLayer++;
1036 //layerWidthInfo[k+1].layerNumber = ND_rank(ng->nodes[0]);
1037
1038 //layerWidthInfo[k+1].width += ( ng->width*DPI + (layerWidthInfo[nLayers].nNodeGroupsInLayer > 1) * GD_nodesep(g) ); // just add the node widths now.
1039
1040 cnt++;
1041
1042 if (cnt == tg)
1043 break;
1044
1045 }
1046 }
1047 }
1048
1049 //calcualte the max width
1050 int maxW = 0;
1051 int nNodeG = 0, l, nDummy = 0;
1052 int index;
1053
1054 for (k = 0; k < nLayers; k++) {
1055 //printf("Layer#=%d, #dumNd=%d, width=%0.1lf, node=%s\n", layerWidthInfo[k].layerNumber, layerWidthInfo[k].nDummyNodes, layerWidthInfo[k].width,
1056 // agnameof(layerWidthInfo[k].nodeGroupsInLayer[0]->nodes[0]));
1057 if (layerWidthInfo[k].width > maxW) // && layerWidthInfo[k].nNodeGroupsInLayer > 0)
1058 {
1059 maxW = layerWidthInfo[k].width;
1060 nNodeG = layerWidthInfo[k].nNodeGroupsInLayer;
1061 l = layerWidthInfo[k].layerNumber;
1062 nDummy = layerWidthInfo[k].nDummyNodes;
1063 index = k;
1064 }
1065 }
1066 //printf("Ht=%d, MxW=%d, #ndGr=%d, #dumNd=%d, lyr#=%d, 1stNd=%s\n", (nLayers-1)*DPI, maxW, nNodeG, nDummy, l, agnameof(layerWidthInfo[index].nodeGroupsInLayer[0]->nodes[0]));
1067
1068 // printf("Finally...\n------------------\n");
1069 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
1070 //printf("%s, rank = %d, ranktype = %d\n", agnameof(v, ND_rank(v), ND_ranktype(v));
1071 }
1072
1073}
1074#endif
1075
1076/* applyPacking2:
1077 * The following is the packing heuristic for wide layout.
1078 */
1079static void applyPacking2(graph_t * g)
1080{
1081 int i;
1082
1083 sortedLayerIndex = N_NEW(agnnodes(g), int);
1084
1085 for (i = 0; i < agnnodes(g); i++) {
1086 sortedLayerIndex[i] = i;
1087 }
1088
1089 computeLayerWidths(g);
1090 sortLayers(g);
1091 reduceMaxWidth2(g);
1092
1093}
1094
1095#ifdef UNUSED
1096/* applyPacking4:
1097 * The following is the packing heuristic for wide layout.
1098 * It's used with Nikolov-Healy healy heuristic.
1099 */
1100void applyPacking4(graph_t * g)
1101{
1102 int i;
1103
1104 sortedLayerIndex = N_NEW(agnnodes(g), int);
1105
1106 for (i = 0; i < agnnodes(g); i++) {
1107 sortedLayerIndex[i] = i;
1108 }
1109
1110
1111 for (i = 0; i < 1; i++) {
1112 /* printf("iteration = %d\n----------------------\n", i);
1113 for (v = agfstnode(g); v; v = agnxtnode(g,v))
1114 {
1115 printf("%s rank = %d\n", agnameof(v), ND_rank(v));
1116 }
1117 */
1118
1119
1120 computeLayerWidths(g);
1121 sortLayers(g);
1122 reduceMaxWidth2(g);
1123 //printf("====================\n");
1124 }
1125}
1126
1127/*
1128 * NOCOLOV & HEALY'S NODE PROMOTION HEURISTIC
1129 */
1130
1131/****************************************************************
1132 * This data structure is needed for backing up node information
1133 * during node promotion
1134 ****************************************************************/
1135typedef struct myNodeInfo_t {
1136 int indegree;
1137 int outdegree;
1138 int rank;
1139 Agnode_t *node;
1140} myNodeInfo_t;
1141
1142myNodeInfo_t *myNodeInfo;
1143
1144
1145/* getMaxLevelNumber:
1146 * return the maximum level number assigned
1147 */
1148int getMaxLevelNumber(graph_t * g)
1149{
1150 int max;
1151 Agnode_t *n;
1152
1153 max = ND_rank(agfstnode(g));
1154
1155 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1156 if (ND_rank(n) > max)
1157 max = ND_rank(n);
1158 }
1159
1160 return max;
1161}
1162
1163/* countDummyDiff:
1164 * return the difference in the count of dummy nodes before
1165 * and after promoting the node v
1166 */
1167static int countDummyDiff(graph_t * g, Agnode_t * v, int max)
1168{
1169 int dummydiff = 0;
1170 Agedge_t *e;
1171 Agnode_t *u;
1172 int maxR = 0;
1173 int j;
1174
1175 for (j = 0; j < ND_in(v).size; j++) {
1176 e = ND_in(v).list[j];
1177 u = agtail(e);
1178
1179 if (myNodeInfo[ND_id(u)].rank == myNodeInfo[ND_id(v)].rank + 1) {
1180 dummydiff += countDummyDiff(g, u, max);
1181 }
1182 }
1183
1184 if (myNodeInfo[ND_id(v)].rank + 1 < max
1185 || (ND_in(v).size == 0 && myNodeInfo[ND_id(v)].rank + 1 <= max))
1186 myNodeInfo[ND_id(v)].rank += 1;
1187
1188 dummydiff = dummydiff - ND_in(v).size + ND_out(v).size;
1189
1190
1191 return dummydiff;
1192}
1193
1194/* applyPromotionHeuristic:
1195 * Node Promotion Heuristic
1196 * by Nikolov and Healy
1197 */
1198static void applyPromotionHeuristic(graph_t * g)
1199{
1200 graph_t graphBkup = *g;
1201 Agnode_t *v;
1202 int promotions;
1203
1204 int max = getMaxLevelNumber(g);
1205 int count = 0;
1206 int nNodes = agnnodes(g);
1207 int i, j;
1208
1209 myNodeInfo = N_NEW(nNodes, myNodeInfo_t);
1210 myNodeInfo_t *myNodeInfoBak = N_NEW(nNodes, myNodeInfo_t);
1211
1212 for (v = agfstnode(g), i = 0; v; v = agnxtnode(g, v), i++) {
1213 myNodeInfo[i].indegree = ND_in(v).size;
1214 myNodeInfo[i].outdegree = ND_out(v).size;
1215 myNodeInfo[i].rank = ND_rank(v);
1216 myNodeInfo[i].node = v;
1217 ND_id(v) = i;
1218
1219 myNodeInfoBak[i] = myNodeInfo[i];
1220 }
1221
1222 do {
1223 promotions = 0;
1224
1225 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
1226 if (ND_in(v).size > 0) {
1227 if (countDummyDiff(g, v, max) <= 0) {
1228 promotions++;
1229
1230 for (j = 0; j < nNodes; j++) {
1231 myNodeInfoBak[j] = myNodeInfo[j];
1232 }
1233
1234 } else {
1235 for (j = 0; j < nNodes; j++) {
1236 myNodeInfo[j] = myNodeInfoBak[j];
1237 }
1238 }
1239 }
1240 }
1241 count++;
1242 } while (count < max);
1243
1244 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
1245 ND_rank(v) = myNodeInfo[ND_id(v)].rank;
1246 }
1247}
1248
1249/*
1250 * LONGEST PATH ALGORITHM
1251 */
1252
1253/* allNeighborsAreBelow:
1254 * Return 1 if all the neighbors of n already ranked, else 0
1255 */
1256static int allNeighborsAreBelow(Agnode_t * n)
1257{
1258 Agedge_t *e;
1259 /* graph_t *g = agraphof(n); */
1260 int i;
1261
1262 //for (e = agfstout(g,n); e; e = agnxtout(g,e))
1263 for (i = 0; i < ND_out(n).size; i++) {
1264 e = ND_out(n).list[i];
1265 if (ED_edge_type(e) == VIRTUAL) {
1266 if (ED_to_orig(e) != NULL)
1267 e = ED_to_orig(e);
1268 else if (ND_node_type(aghead(e)) == VIRTUAL)
1269 continue;
1270 }
1271
1272 if (ND_pinned(aghead(e)) != 2) //neighbor of n is not below
1273 {
1274 return 0;
1275 }
1276 }
1277
1278 return 1;
1279}
1280
1281/* reverseLevelNumbers:
1282 * In Nikolov and Healy ranking, bottom layer ranking is 0 and
1283 * top layer ranking is the maximum.
1284 * Graphviz does the opposite.
1285 * This function does the reversing from Nikolov to Graphviz.
1286 */
1287static void reverseLevelNumbers(graph_t * g)
1288{
1289 Agnode_t *n;
1290 int max;
1291
1292 max = getMaxLevelNumber(g);
1293
1294 //printf("max = %d\n", max);
1295
1296 //return;
1297 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1298 ND_rank(n) = max - ND_rank(n);
1299 }
1300}
1301
1302/* doSameRank:
1303 * Maintain the same ranking constraint.
1304 * Can only be used with the Nikolov and Healy algorithm
1305 */
1306static void doSameRank(graph_t * g)
1307{
1308 int i;
1309 for (i = 0; i < nNodeGroups; i++) {
1310 int j;
1311
1312 for (j = 0; j < nodeGroups[i].nNodes; j++) {
1313 if (ND_ranktype(nodeGroups[i].nodes[j]) == SAMERANK) //once we find a SAMERANK node in a group- make all the members of the group SAMERANK
1314 {
1315 int k;
1316 int r = ND_rank(UF_find(nodeGroups[i].nodes[j]));
1317 for (k = 0; k < nodeGroups[i].nNodes; k++) {
1318 ND_rank(nodeGroups[i].
1319 nodes[(j + k) % nodeGroups[i].nNodes]) = r;
1320 }
1321
1322 break;
1323 }
1324 }
1325 }
1326}
1327
1328/* doMinRank:
1329 * Maintain the MIN ranking constraint.
1330 * Can only be used with the Nikolov and Healy algorithm
1331 */
1332void doMinRank(graph_t * g)
1333{
1334 int i;
1335 for (i = 0; i < nNodeGroups; i++) {
1336 int j;
1337
1338 for (j = 0; j < nodeGroups[i].nNodes; j++) {
1339 if (ND_ranktype(nodeGroups[i].nodes[j]) == MINRANK) //once we find a MINRANK node in a group- make the rank of all the members of the group 0
1340 {
1341 int k;
1342 for (k = 0; k < nodeGroups[i].nNodes; k++) {
1343 ND_rank(nodeGroups[i].
1344 nodes[(j + k) % nodeGroups[i].nNodes]) = 0;
1345 if (ND_ranktype
1346 (nodeGroups[i].
1347 nodes[(j + k) % nodeGroups[i].nNodes]) !=
1348 SOURCERANK)
1349 ND_ranktype(nodeGroups[i].
1350 nodes[(j +
1351 k) % nodeGroups[i].nNodes]) =
1352 MINRANK;
1353 }
1354
1355 break;
1356 }
1357 }
1358 }
1359}
1360
1361/* getMaxRank:
1362 * Return the maximum rank among all nodes.
1363 */
1364static int getMaxRank(graph_t * g)
1365{
1366 int i;
1367 node_t *v;
1368 int maxR = ND_rank(agfstnode(g));
1369 for (v = agfstnode(g); v; v = agnxtnode(g, v)) {
1370 if (ND_rank(v) > maxR)
1371 maxR = ND_rank(v);
1372 }
1373
1374 return maxR;
1375}
1376
1377/* doMaxRank:
1378 * Maintain the MAX ranking constraint.
1379 * Can only be used with the Nikolov and Healy algorithm
1380 */
1381static void doMaxRank(graph_t * g)
1382{
1383 int i;
1384 for (i = 0; i < nNodeGroups; i++) {
1385 int j;
1386 int maxR = getMaxRank(g);
1387
1388 for (j = 0; j < nodeGroups[i].nNodes; j++) {
1389 if (ND_ranktype(nodeGroups[i].nodes[j]) == MAXRANK) //once we find a MAXRANK node in a group- make the rank of all the members of the group MAX
1390 {
1391 int k;
1392 for (k = 0; k < nodeGroups[i].nNodes; k++) {
1393 ND_rank(nodeGroups[i].
1394 nodes[(j + k) % nodeGroups[i].nNodes]) = maxR;
1395 if (ND_ranktype
1396 (nodeGroups[i].
1397 nodes[(j + k) % nodeGroups[i].nNodes]) !=
1398 SINKRANK)
1399 ND_ranktype(nodeGroups[i].
1400 nodes[(j +
1401 k) % nodeGroups[i].nNodes]) =
1402 MAXRANK;
1403 }
1404
1405 break;
1406 }
1407 }
1408 }
1409}
1410
1411/* doSourceRank:
1412 * Maintain the SOURCE ranking constraint.
1413 * Can only be used with the Nikolov and Healy algorithm
1414 */
1415static void doSourceRank(graph_t * g)
1416{
1417 int i;
1418 int flag = 0;
1419
1420 for (i = 0; i < nNodeGroups; i++) {
1421 int j;
1422
1423 for (j = 0; j < nodeGroups[i].nNodes; j++) {
1424 //once we find a SOURCERANK node in a group- make the rank of all the members of the group 0
1425 if (ND_ranktype(nodeGroups[i].nodes[j]) == SOURCERANK) {
1426 int k;
1427 for (k = 0; k < nodeGroups[i].nNodes; k++) {
1428 ND_rank(nodeGroups[i].
1429 nodes[(j + k) % nodeGroups[i].nNodes]) = 0;
1430 ND_ranktype(nodeGroups[i].
1431 nodes[(j + k) % nodeGroups[i].nNodes]) =
1432 SOURCERANK;
1433 }
1434
1435 flag = 1;
1436 break;
1437 }
1438 }
1439 }
1440
1441 if (!flag)
1442 return;
1443
1444 flag = 0;
1445
1446 //The SourceRank group might be the only group having rank 0. Check if increment of ranking of other nodes is necessary at all.
1447 for (i = 0; i < nNodeGroups; i++) {
1448 if (nodeGroups[i].nNodes > 0
1449 && ND_ranktype(nodeGroups[i].nodes[0]) != SOURCERANK
1450 && ND_rank(nodeGroups[i].nodes[0]) == 0) {
1451 flag = 1;
1452 break;
1453 }
1454 }
1455
1456
1457 if (!flag)
1458 return;
1459
1460 //Now make all NON-SourceRank nodes' ranking nonzero (increment)
1461 for (i = 0; i < nNodeGroups; i++) {
1462 if (nodeGroups[i].nNodes > 0
1463 && ND_ranktype(nodeGroups[i].nodes[0]) != SOURCERANK) {
1464 int j;
1465
1466 for (j = 0; j < nodeGroups[i].nNodes; j++) {
1467 ND_rank(nodeGroups[i].nodes[j])++;
1468 }
1469 }
1470 }
1471}
1472
1473/* doSinkRank:
1474 * Maintain the SINK ranking constraint.
1475 * Can only be used with the Nikolov and Healy algorithm
1476 */
1477static void doSinkRank(graph_t * g)
1478{
1479 int i, max;
1480 int flag = 0;
1481
1482 max = getMaxRank(g);
1483
1484
1485 //Check if any non-sink node has rank = max
1486 for (i = 0; i < nNodeGroups; i++) {
1487 if (nodeGroups[i].nNodes > 0
1488 && ND_ranktype(nodeGroups[i].nodes[0]) != SINKRANK
1489 && ND_rank(nodeGroups[i].nodes[0]) == max) {
1490 flag = 1;
1491 break;
1492 }
1493 }
1494
1495 if (!flag)
1496 return;
1497
1498 for (i = 0; i < nNodeGroups; i++) {
1499 int j;
1500
1501 for (j = 0; j < nodeGroups[i].nNodes; j++) {
1502 if (ND_ranktype(nodeGroups[i].nodes[j]) == SINKRANK) //once we find a SINKRANK node in a group- make the rank of all the members of the group: max+1
1503 {
1504 int k;
1505 for (k = 0; k < nodeGroups[i].nNodes; k++) {
1506 ND_rank(nodeGroups[i].
1507 nodes[(j + k) % nodeGroups[i].nNodes]) =
1508 max + 1;
1509 ND_ranktype(nodeGroups[i].
1510 nodes[(j + k) % nodeGroups[i].nNodes]) =
1511 SINKRANK;
1512 }
1513
1514 break;
1515 }
1516 }
1517 }
1518}
1519
1520/* rank2:
1521 * Initial codes for ranking (Nikolov-Healy).
1522 * It's no longer used.
1523 */
1524void rank2(graph_t * g)
1525{
1526 int currentLayer = 1;
1527 int nNodes = agnnodes(g);
1528 int nEdges = agnedges(g);
1529 int nPinnedNodes = 0, nSelected = 0;
1530 Agnode_t *n, **UArray;
1531 int USize = 0;
1532 int i, prevSize = 0;
1533
1534 UArray = N_NEW(nEdges * 2, Agnode_t *);
1535
1536 /* make all pinning values 0 */
1537 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1538 ND_pinned(n) = 0;
1539 }
1540
1541 while (nPinnedNodes != nNodes) {
1542 for (nSelected = 0, n = agfstnode(g); n; n = agnxtnode(g, n)) {
1543 if (ND_pinned(n) == 0) {
1544 if (allNeighborsAreBelow(n)) {
1545 ND_pinned(n) = 1;
1546
1547 UArray[USize] = n;
1548 USize++;
1549
1550 ND_rank(n) = currentLayer;
1551 nPinnedNodes++;
1552 nSelected++;
1553 }
1554 }
1555 }
1556
1557 if (nSelected == 0) //no node has been selected
1558 {
1559 currentLayer++;
1560 for (i = prevSize; i < USize; i++) {
1561 ND_pinned(UArray[i]) = 2; //pinning value of 2 indicates this node is below the current node under consideration
1562 }
1563
1564 prevSize = USize;
1565 }
1566 }
1567
1568 //Apply Nikolov's node promotion heuristic
1569 applyPromotionHeuristic(g);
1570
1571 //this is for the sake of graphViz layer numbering scheme
1572 reverseLevelNumbers(g);
1573
1574 computeNodeGroups(g); //groups of UF DS nodes
1575
1576 //modify the ranking to respect the same ranking constraint
1577 doSameRank(g);
1578
1579 //satisfy min ranking constraints
1580 doMinRank(g);
1581 doMaxRank(g);
1582
1583 //satisfy source ranking constraints
1584 doSourceRank(g);
1585 doSinkRank(g);
1586
1587 //Apply the FFDH algorithm to achieve better aspect ratio;
1588 applyPacking(g, 1); //achieve an aspect ratio of 1
1589}
1590#endif
1591
1592/****************************************************************
1593 * Initialize all the edge types to NORMAL
1594 ****************************************************************/
1595void initEdgeTypes(graph_t * g)
1596{
1597 edge_t *e;
1598 node_t *n;
1599 int lc;
1600
1601 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1602 for (lc = 0; lc < ND_in(n).size; lc++) {
1603 e = ND_in(n).list[lc];
1604 ED_edge_type(e) = NORMAL;
1605 }
1606 }
1607}
1608
1609/* computeCombiAR:
1610 * Compute and return combinatorial aspect ratio
1611 * =
1612 * Width of the widest layer / Height
1613 * (in ranking phase)
1614 */
1615static double computeCombiAR(graph_t * g)
1616{
1617 int i, maxLayerIndex;
1618 double maxW = 0;
1619 double maxH;
1620 double ratio;
1621
1622 computeLayerWidths(g);
1623 maxH = (nLayers - 1) * GD_ranksep(g);
1624
1625 for (i = 0; i < nLayers; i++) {
1626 if (maxW <
1627 layerWidthInfo[i].width +
1628 layerWidthInfo[i].nDummyNodes * GD_nodesep(g)) {
1629 maxW =
1630 layerWidthInfo[i].width +
1631 layerWidthInfo[i].nDummyNodes * GD_nodesep(g);
1632 maxLayerIndex = i;
1633 }
1634 maxH += layerWidthInfo[i].height;
1635 }
1636
1637 ratio = maxW / maxH;
1638
1639 return ratio;
1640}
1641
1642#ifdef UNUSED
1643/* applyExpansion:
1644 * Heuristic for expanding very narrow graphs by edge reversal.
1645 * Needs improvement.
1646 */
1647void applyExpansion(graph_t * g)
1648{
1649 node_t *sink = NULL;
1650 int i, k;
1651 edge_t *e;
1652
1653 computeLayerWidths(g);
1654
1655 for (i = 0; i < nLayers; i++) {
1656 if (layerWidthInfo[i].layerNumber == nLayers / 2) {
1657 k = i;
1658 break;
1659 }
1660 }
1661
1662 //now reverse the edges, from the k-th layer nodes to their parents
1663 for (i = 0; i < layerWidthInfo[k].nNodeGroupsInLayer; i++) {
1664 int p;
1665 nodeGroup_t *ng = layerWidthInfo[k].nodeGroupsInLayer[i];
1666 for (p = 0; p < ng->nNodes; p++) {
1667 node_t *nd = ng->nodes[p];
1668
1669 while (e = ND_in(nd).list[0]) {
1670 printf("Reversing edge: %s->%s\n", agnemeof(agtail(e)),
1671 agnameof(aghead(e)));
1672 reverse_edge(e);
1673 }
1674
1675 int j, l;
1676 node_t *v3;
1677 edge_t *e3;
1678 for (v3 = agfstnode(g); v3; v3 = agnxtnode(g, v3)) {
1679 for (e3 = agfstout(g, v3); e3; e3 = agnxtout(g, e3)) {
1680 if (ND_rank(aghead(e3)) > k && ND_rank(agtail(e3)) < k) {
1681 printf("Reversing long edge: %s->%s\n",
1682 agnameof(agtail(e3)), agnameof(aghead(e3)));
1683 reverse_edge(e3);
1684 }
1685
1686 }
1687 }
1688
1689 /*for (l = 0; l < nLayers; l++) {
1690 if (layerWidthInfo[l].layerNumber <= k)
1691 continue;
1692
1693 for (j = 0; j < layerWidthInfo[l].nNodeGroupsInLayer; j++) {
1694 int q;
1695 nodeGroup_t *ng2 = layerWidthInfo[l].nodeGroupsInLayer[j];
1696 for (q = 0; q < ng2->nNodes; q++) {
1697 node_t *nd2 = ng2->nodes[q];
1698 edge_t *e2;
1699 int s = 0;
1700 while (e2 = ND_in(nd2).list[s]) {
1701 if (ND_rank(agtail(e2)) < k) {
1702 printf("Reversing edge: %s->%s\n",
1703 agnameof(agtail(e2)), agnameof(aghead(e2)));
1704 getchar();
1705 //reverse_edge(e2);
1706 }
1707 else s++;
1708 }
1709 }
1710 }
1711 } */
1712
1713 if (sink == NULL) {
1714 int brFlag = 1;
1715 for (l = 0; l < nLayers && brFlag; l++) {
1716 for (j = 0;
1717 j < layerWidthInfo[l].nNodeGroupsInLayer
1718 && brFlag; j++) {
1719 int q;
1720 nodeGroup_t *ng2 =
1721 layerWidthInfo[l].nodeGroupsInLayer[j];
1722 for (q = 0; q < ng2->nNodes && brFlag; q++) {
1723 node_t *nd2 = ng2->nodes[q];
1724 if (ND_in(nd2).size == 0) {
1725 sink = nd2;
1726 brFlag = 0;
1727 }
1728 }
1729 }
1730 }
1731
1732 }
1733
1734 virtual_edge(nd, /*sink */
1735 layerWidthInfo[0].nodeGroupsInLayer[0]->nodes[0],
1736 NULL);
1737 }
1738 }
1739
1740 //collapse_sets(g);
1741}
1742#endif
1743
1744/* zapLayers:
1745 * After applying the expansion heuristic, some layers are
1746 * found to be empty.
1747 * This function removes the empty layers.
1748 */
1749static void zapLayers(graph_t * g)
1750{
1751 int i, j;
1752 int start = 0;
1753 int count = 0;
1754
1755 /* the layers are sorted by the layer number. now zap the empty layers */
1756
1757 for (i = 0; i < nLayers; i++) {
1758 if (layerWidthInfo[i].nNodeGroupsInLayer == 0) {
1759 if (count == 0)
1760 start = layerWidthInfo[i].layerNumber;
1761 count++;
1762 } else if (count && layerWidthInfo[i].layerNumber > start) {
1763 for (j = 0; j < layerWidthInfo[i].nNodeGroupsInLayer; j++) {
1764 int q;
1765 nodeGroup_t *ng = layerWidthInfo[i].nodeGroupsInLayer[j];
1766 for (q = 0; q < ng->nNodes; q++) {
1767 node_t *nd = ng->nodes[q];
1768 ND_rank(nd) -= count;
1769 }
1770 }
1771 }
1772 }
1773}
1774
1775/* rank3:
1776 * ranking function for dealing with wide/narrow graphs,
1777 * or graphs with varying node widths and heights.
1778 * This function iteratively calls dot's rank1() function and
1779 * applies packing (by calling the applyPacking2 function.
1780 * applyPacking2 function calls the reduceMaxWidth2 function
1781 * for partitioning the widest layer).
1782 * Initially the iterations argument is -1, for which rank3
1783 * callse applyPacking2 function until the combinatorial aspect
1784 * ratio is <= the desired aspect ratio.
1785 */
1786void rank3(graph_t * g, aspect_t * asp)
1787{
1788 Agnode_t *n;
1789 int i;
1790 int iterations = asp->nextIter;
1791 double lastAR = MAXDOUBLE;
1792
1793 computeNodeGroups(g); /* groups of UF DS nodes */
1794
1795 for (i = 0; (i < iterations) || (iterations == -1); i++) {
1796 /* initialize all ranks to be 0 */
1797 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1798 ND_rank(n) = 0;
1799 }
1800
1801 /* need to compute ranking first--- by Network flow */
1802
1803 rank1(g);
1804
1805 asp->combiAR = computeCombiAR(g);
1806 if (Verbose)
1807 fprintf(stderr, "combiAR = %lf\n", asp->combiAR);
1808
1809
1810 /* Uncomment the following codes, for working with narrow graphs */
1811#ifdef UNUSED
1812 if (combiAR < 0.8 * targetAR) {
1813 char str[20];
1814 printf("Apply expansion? (y/n):");
1815 scanf("%s", str);
1816 if (strcmp(str, "y") == 0)
1817 applyExpansion(g);
1818 break;
1819 } else
1820#endif
1821 /* Success or if no improvement */
1822 if ((asp->combiAR <= asp->targetAR) || ((iterations == -1) && (lastAR <= asp->combiAR))) {
1823 asp->prevIterations = asp->curIterations;
1824 asp->curIterations = i;
1825
1826 break;
1827 }
1828 lastAR = asp->combiAR;
1829 /* Apply the FFDH algorithm to reduce the aspect ratio; */
1830 applyPacking2(g);
1831 }
1832
1833 /* do network flow once again... incorporating the added edges */
1834 rank1(g);
1835
1836 computeLayerWidths(g);
1837 zapLayers(g);
1838 asp->combiAR = computeCombiAR(g);
1839}
1840
1841#ifdef UNUSED
1842/* NikolovHealy:
1843 * Nikolov-Healy approach to ranking.
1844 * First, use longest path algorithm.
1845 * Then use node promotion heuristic.
1846 * This function is called by rank4 function.
1847 */
1848static void NikolovHealy(graph_t * g)
1849{
1850 int currentLayer = 1;
1851 int nNodes = agnnodes(g);
1852 int nEdges = agnedges(g);
1853 int nPinnedNodes = 0, nSelected = 0;
1854 Agnode_t *n, **UArray;
1855 int USize = 0;
1856 int i, prevSize = 0;
1857
1858 /************************************************************************
1859 * longest path algorithm
1860 ************************************************************************/
1861 UArray = N_NEW(nEdges * 2, Agnode_t *);
1862
1863 /* make all pinning values 0 */
1864 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1865 ND_pinned(n) = 0;
1866 }
1867
1868 while (nPinnedNodes != nNodes) {
1869 for (nSelected = 0, n = agfstnode(g); n; n = agnxtnode(g, n)) {
1870 if (ND_pinned(n) == 0) {
1871 if (allNeighborsAreBelow(n)) {
1872 ND_pinned(n) = 1;
1873
1874 UArray[USize] = n;
1875 USize++;
1876
1877 ND_rank(n) = currentLayer;
1878 nPinnedNodes++;
1879 nSelected++;
1880 }
1881 }
1882 }
1883
1884 if (nSelected == 0) //no node has been selected
1885 {
1886 currentLayer++;
1887 for (i = prevSize; i < USize; i++) {
1888 ND_pinned(UArray[i]) = 2; //pinning value of 2 indicates this node is below the current node under consideration
1889 }
1890
1891 prevSize = USize;
1892 }
1893
1894 }
1895
1896 /************************************************************************
1897 * end of longest path algorithm
1898 ************************************************************************/
1899
1900 //Apply node promotion heuristic
1901 applyPromotionHeuristic(g);
1902
1903 //this is for the sake of graphViz layer numbering scheme
1904 reverseLevelNumbers(g);
1905
1906}
1907
1908
1909/* rank4:
1910 * This function is calls the NikolovHealy function
1911 * for ranking in the Nikolov-Healy approach.
1912 */
1913void rank4(graph_t * g, int iterations)
1914{
1915 int currentLayer = 1;
1916 int nNodes = agnnodes(g);
1917 int nEdges = agnedges(g);
1918 int nPinnedNodes = 0, nSelected = 0;
1919 Agnode_t *n, **UArray;
1920 int USize = 0;
1921 int i, prevSize = 0;
1922
1923 int it;
1924 printf("# of interations of packing: ");
1925 scanf("%d", &it);
1926 printf("it=%d\n", it);
1927
1928 computeNodeGroups(g); //groups of UF DS nodes
1929
1930 for (i = 0; i < it; i++) {
1931 for (n = agfstnode(g); n; n = agnxtnode(g, n)) {
1932 ND_rank(n) = 0;
1933 }
1934
1935 NikolovHealy(g);
1936
1937 edge_t *e;
1938 int cnt = 0;
1939 int lc;
1940
1941
1942 combiAR = computeCombiAR(g);
1943 printf("%d. combiAR = %lf\n", i, combiAR);
1944
1945 /*
1946 //modify the ranking to respect the same ranking constraint
1947 doSameRank(g);
1948
1949 //satisfy min ranking constraints
1950 doMinRank(g);
1951 doMaxRank(g);
1952
1953 //satisfy source ranking constraints
1954 doSourceRank(g);
1955 doSinkRank(g);
1956 */
1957
1958 //Apply the FFDH algorithm to achieve better aspect ratio;
1959 applyPacking4(g);
1960
1961 }
1962
1963}
1964#endif
1965
1966/* init_UF_size:
1967 * Initialize the Union Find data structure
1968 */
1969void init_UF_size(graph_t * g)
1970{
1971 node_t *n;
1972
1973 for (n = agfstnode(g); n; n = agnxtnode(g, n))
1974 ND_UF_size(n) = 0;
1975}
1976
1977aspect_t*
1978setAspect (Agraph_t * g, aspect_t* adata)
1979{
1980 double rv;
1981 char *p;
1982 int r, passes = DEF_PASSES;
1983
1984 p = agget (g, "aspect");
1985
1986 if (!p || ((r = sscanf (p, "%lf,%d", &rv, &passes)) <= 0)) {
1987 adata->nextIter = 0;
1988 adata->badGraph = 0;
1989 return NULL;
1990 }
1991 agerr (AGWARN, "the aspect attribute has been disabled due to implementation flaws - attribute ignored.\n");
1992 adata->nextIter = 0;
1993 adata->badGraph = 0;
1994 return NULL;
1995
1996 if (rv < MIN_AR) rv = MIN_AR;
1997 else if (rv > MAX_AR) rv = MAX_AR;
1998 adata->targetAR = rv;
1999 adata->nextIter = -1;
2000 adata->nPasses = passes;
2001 adata->badGraph = 0;
2002
2003 if (Verbose)
2004 fprintf(stderr, "Target AR = %g\n", adata->targetAR);
2005
2006 return adata;
2007}
2008