-#error TODO: Support windowschuds
+#warning TODO: Support windowschuds
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
#include "../structs.h"
+worker_arg warg;
+int waiting_workers;
+int should_quit = 0;
+
+extern FVec2 ** grid;
+extern FVec2 ** grid_prime;
+extern FVec2 ** grid_temp;
+extern IVec2 grid_size;
+
+pthread_t* threads;
+pthread_mutex_t mutex;
+pthread_barrier_t barrier;
+
+/* TODO: This should go in a header, platforms don't care. */
+float rd_a_prime(FVec2 **source_grid, RD_Opts opts, int x, int y, Mat3 kernel, float A, float B);
+float rd_b_prime(FVec2** source_grid, RD_Opts opts, int x, int y, Mat3 kernel, float A, float B);
+
void* iterator(void* _arg)
-{}
+{
+ worker_arg* warg = (worker_arg*)_arg;
+ RD_Opts opts = warg->opts;
+ int start_x = warg->start_x;
+ int start_y = warg->start_y;
+ int w = warg->width;
+ int h = warg->height;
+
+ for (warg->iterations = 0; warg->iterations < warg->max_iterations; warg->iterations++)
+ {
+ for (int x = start_x; x < w + start_x && x < grid_size.x; x++)
+ {
+ for (int y = start_y; y < h + start_y && y < grid_size.y; y++)
+ {
+ FVec2 each = grid[x][y];
+ if (each.c >= 0.5f)
+ {
+ each.b = 1.0f;
+ each.c -= 5.0f/((float)(opts.max_iterations/100.0f));
+ }
+ grid_prime[x][y].a = rd_a_prime(grid, opts, x, y, laplacian_kernel, each.a, each.b);
+ grid_prime[x][y].b = rd_b_prime(grid, opts, x, y, laplacian_kernel, each.a, each.b);
+ }
+ }
+
+ pthread_mutex_lock(&mutex);
+ if (++waiting_workers == warg->worker_count)
+ {
+ grid_temp = grid;
+ grid = grid_prime;
+ grid_prime = grid_temp;
+ waiting_workers = 0;
+ resource_generation_progress = ((float)warg->iterations/(float)warg->max_iterations);
+ }
+ pthread_mutex_unlock(&mutex);
+ pthread_barrier_wait(&barrier);
+ }
+
+ // One last synchronization so boss thread doesn't die early
+ pthread_barrier_wait(&barrier);
+ should_quit = 1;
+
+ return _arg;
+}
void start(int worker_count, RD_Opts active_opt)
-{}
+{
+ worker_arg warg = (worker_arg){
+ active_opt, grid, grid_prime,
+ 0, 0, (grid_size.x), (grid_size.y),
+ .worker_count = worker_count,
+ .max_iterations = active_opt.max_iterations
+ };
+
+ threads = (pthread_t*)malloc(sizeof(pthread_t) * worker_count);
+ pthread_mutex_init(&mutex, NULL);
+ pthread_barrier_init(&barrier, NULL, warg.worker_count);
+
+ worker_arg wargs[worker_count];
+
+ for (int t = 0; t < warg.worker_count; t++)
+ {
+ wargs[t] = warg;
+ wargs[t].worker_id = t;
+ wargs[t].width = (grid_size.x/worker_count) + ((t == worker_count-1) ? 0 : 4);
+ wargs[t].start_x = (wargs[t].width * t);
+ pthread_create(&threads[t], NULL, iterator, &wargs[t]);
+ }
+}
+/* TODO: Revisit this for any new additions. */
int cleanup()
-{}
+{
+ for (int t = 0; t < warg.worker_count; t++)
+ {
+ pthread_join(threads[t], NULL);
+ }
+
+ /* TODO: Actually this probably shouldn't be freeing resources allocated outside of the unit. */
+ free(grid);
+ free(grid_prime);
+ free(threads);
+
+ return 0;
+}
+