From 989c3e973f18e6533da414384ef0add95c692b89 Mon Sep 17 00:00:00 2001 From: David Phillips Date: Thu, 5 May 2016 23:46:02 +1200 Subject: Invert load sharing for thread vs cluster node Rows are divided between cluster nodes and columns between threads. This is a major change for anyone using this in a cluster, but it was done so to enable the frame interlacer to use buffered reads and writes. --- algorithms/burning-ship.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'algorithms/burning-ship.c') diff --git a/algorithms/burning-ship.c b/algorithms/burning-ship.c index 5f14c1d..6aea927 100644 --- a/algorithms/burning-ship.c +++ b/algorithms/burning-ship.c @@ -38,12 +38,12 @@ void *generate_burning_ship_section(void *section) double left = -2.2f; /* FIXME document this */ - b = (d->core*(size_units/size)+top); + b = clust_id*(size_units/size)+top; /* FIXME document this */ - for (y = d->core; y < size; y += cores) + for (y = clust_id; y < size; y += clust_total) { - a = clust_id*(size_units/size)+left; /* FIXME document this */ - for (x = clust_id; x < size; x+=clust_total) + a = d->core*(size_units/size)+left; + for (x = d->core; x < size; x += cores) { z = 0; c = a+I*b; @@ -55,9 +55,9 @@ void *generate_burning_ship_section(void *section) z = cpow( fabs(creal(z)) + I*fabs(cimag(z)) , power) + c; } d->data[d->idx++] = (255*i)/iterat; - a += (clust_total*size_units)/size; + a += cores*(size_units/size); } - b += (cores*size_units)/size; + b += clust_total*(size_units/size); } return NULL; } -- cgit v1.1