From 2d028e5b8df211068bbe96a30a404ad67ab6aea8 Mon Sep 17 00:00:00 2001 From: Randy McShandy Date: Sun, 28 Apr 2024 18:38:05 -0500 Subject: [PATCH] Reorganize files and code for basic multiplatform support --- CMakeLists.txt | 14 +- main.c => src/main.c | 0 src/platform.h | 10 ++ src/platforms/platform_posix.c | 105 ++++++++++++++ src/platforms/platform_win.c | 12 ++ stb_image_write.h => src/stb_image_write.h | 0 structs.c => src/structs.c | 0 structs.h => src/structs.h | 2 - utils.c => src/utils.c | 154 +++------------------ 9 files changed, 156 insertions(+), 141 deletions(-) rename main.c => src/main.c (100%) create mode 100644 src/platform.h create mode 100644 src/platforms/platform_posix.c create mode 100644 src/platforms/platform_win.c rename stb_image_write.h => src/stb_image_write.h (100%) rename structs.c => src/structs.c (100%) rename structs.h => src/structs.h (97%) rename utils.c => src/utils.c (55%) diff --git a/CMakeLists.txt b/CMakeLists.txt index e1aa6ad..c56b104 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,8 +4,20 @@ project( LANGUAGES C ) +set(CMAKE_EXPORT_COMPILE_COMMANDS 1) set(CMAKE_C_COMPILER gcc) -FILE(GLOB POSIX_SRC *.c) + +FILE(GLOB POSIX_SRC src/*.c src/platforms/platform_posix.c) +FILE(GLOB WINDOWS_SRC src/*.c src/platforms/platform_win.c) + add_executable(posix_BC ${POSIX_SRC}) +add_executable(windows_BC ${WINDOWS_SRC}) + target_compile_definitions(posix_BC PUBLIC POSIX_BC=1) +target_compile_definitions(windows_BC PUBLIC WINDOWS_BC=1) + +target_compile_options(posix_BC PUBLIC -Wall) + target_link_libraries(posix_BC -lm -lpthread) +target_link_libraries(windows_BC -lm -lpthread) + diff --git a/main.c b/src/main.c similarity index 100% rename from main.c rename to src/main.c diff --git a/src/platform.h b/src/platform.h new file mode 100644 index 0000000..caf46c9 --- /dev/null +++ b/src/platform.h @@ -0,0 +1,10 @@ +#ifndef PLATFORM_POSIX +#define PLATFORM_POSIX + +#include "structs.h" + +void* iterator(void* _arg); +void start(int worker_count, RD_Opts active_opt); +int cleanup(); + +#endif /* PLATFORM_POSIX */ diff --git a/src/platforms/platform_posix.c b/src/platforms/platform_posix.c new file mode 100644 index 0000000..799e837 --- /dev/null +++ b/src/platforms/platform_posix.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include "../structs.h" + +worker_arg warg; +int waiting_workers; +int should_quit = 0; + +extern FVec2 ** grid; +extern FVec2 ** grid_prime; +extern FVec2 ** grid_temp; +extern IVec2 grid_size; + +pthread_t* threads; +pthread_mutex_t mutex; +pthread_barrier_t barrier; + +float rd_a_prime(FVec2 **source_grid, RD_Opts opts, int x, int y, Mat3 kernel, float A, float B); +float rd_b_prime(FVec2** source_grid, RD_Opts opts, int x, int y, Mat3 kernel, float A, float B); + +void* iterator(void* _arg) +{ + worker_arg* warg = (worker_arg*)_arg; + RD_Opts opts = warg->opts; + int start_x = warg->start_x; + int start_y = warg->start_y; + int w = warg->width; + int h = warg->height; + + for (warg->iterations = 0; warg->iterations < warg->max_iterations; warg->iterations++) + { + for (int x = start_x; x < w + start_x && x < grid_size.x; x++) + { + for (int y = start_y; y < h + start_y && y < grid_size.y; y++) + { + FVec2 each = grid[x][y]; + if (each.c >= 0.5f) + { + each.b = 1.0f; + each.c -= 5.0f/((float)(opts.max_iterations/100.0f)); + } + grid_prime[x][y].a = rd_a_prime(grid, opts, x, y, laplacian_kernel, each.a, each.b); + grid_prime[x][y].b = rd_b_prime(grid, opts, x, y, laplacian_kernel, each.a, each.b); + } + } + + pthread_mutex_lock(&mutex); + if (++waiting_workers == warg->worker_count) + { + grid_temp = grid; + grid = grid_prime; + grid_prime = grid_temp; + waiting_workers = 0; + } + pthread_mutex_unlock(&mutex); + pthread_barrier_wait(&barrier); + + } + + // One last synchronization so boss thread doesn't die early + pthread_barrier_wait(&barrier); + should_quit = 1; + + return _arg; +} + +void start(int worker_count, RD_Opts active_opt) +{ + worker_arg warg = (worker_arg){ + active_opt, grid, grid_prime, + 0, 0, (grid_size.x), (grid_size.y), + .worker_count = worker_count, + .max_iterations = active_opt.max_iterations + }; + + threads = (pthread_t*)malloc(sizeof(pthread_t) * worker_count); + pthread_mutex_init(&mutex, NULL); + pthread_barrier_init(&barrier, NULL, warg.worker_count); + + worker_arg wargs[worker_count]; + + for (int t = 0; t < warg.worker_count; t++) + { + wargs[t] = warg; + wargs[t].worker_id = t; + wargs[t].width = (grid_size.x/worker_count) + ((t == worker_count-1) ? 0 : 4); + wargs[t].start_x = (wargs[t].width * t); + pthread_create(&threads[t], NULL, iterator, &wargs[t]); + } +} + +int cleanup() +{ + for (int t = 0; t < warg.worker_count; t++) + { + pthread_join(threads[t], NULL); + } + + free(grid); + free(grid_prime); + free(threads); + + return 0; +} diff --git a/src/platforms/platform_win.c b/src/platforms/platform_win.c new file mode 100644 index 0000000..4fc7854 --- /dev/null +++ b/src/platforms/platform_win.c @@ -0,0 +1,12 @@ +#error TODO: Support windowschuds + +#include "../structs.h" + +void* iterator(void* _arg) +{} + +void start(int worker_count, RD_Opts active_opt) +{} + +int cleanup() +{} diff --git a/stb_image_write.h b/src/stb_image_write.h similarity index 100% rename from stb_image_write.h rename to src/stb_image_write.h diff --git a/structs.c b/src/structs.c similarity index 100% rename from structs.c rename to src/structs.c diff --git a/structs.h b/src/structs.h similarity index 97% rename from structs.h rename to src/structs.h index a239fd0..2c60775 100644 --- a/structs.h +++ b/src/structs.h @@ -1,8 +1,6 @@ #ifndef __RD_STRUCTS__ #define __RD_STRUCTS__ -#include - #define GRID_X 128 #define GRID_Y GRID_X diff --git a/utils.c b/src/utils.c similarity index 55% rename from utils.c rename to src/utils.c index c898a4b..17cdec7 100644 --- a/utils.c +++ b/src/utils.c @@ -3,7 +3,12 @@ #include #include +#warning TODO: I think this header is only posix, check for alt platform replacements +#include + #include "structs.h" +#include "platform.h" + #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" @@ -12,14 +17,7 @@ #define printf(a,...) #endif -worker_arg warg; -int waiting_workers = 0; -int should_quit = 0; - -pthread_t* threads; -pthread_mutex_t mutex; -pthread_barrier_t barrier; - +extern int should_quit; FVec2 ** grid; FVec2 ** grid_prime; FVec2 ** grid_temp; @@ -38,8 +36,7 @@ float kernel_sum(Mat3 kernel, Mat3 source) { return result; } -float rd_a_prime(FVec2 **source_grid, RD_Opts opts, int x, int y, Mat3 kernel, - float A, float B) { +float rd_a_prime(FVec2 **source_grid, RD_Opts opts, int x, int y, Mat3 kernel, float A, float B) { float a_prime = 1.0f; int x_less = (x - 1 < 0) ? x : x - 1; int y_less = (y - 1 < 0) ? y : y - 1; @@ -85,55 +82,6 @@ float rd_b_prime(FVec2** source_grid, RD_Opts opts, int x, int y, Mat3 kernel, f return b_prime; } -void* iterator(void* _arg) -{ - worker_arg* warg = (worker_arg*)_arg; - RD_Opts opts = warg->opts; - int start_x = warg->start_x; - int start_y = warg->start_y; - int w = warg->width; - int h = warg->height; - - for (warg->iterations = 0; warg->iterations < warg->max_iterations; warg->iterations++) - { - printf("worker %d: work unit %d/%d\n", warg->worker_id, warg->iterations, warg->max_iterations); - for (int x = start_x; x < w + start_x && x < grid_size.x; x++) - { - for (int y = start_y; y < h + start_y && y < grid_size.y; y++) - { - FVec2 each = grid[x][y]; - if (each.c >= 0.5f) - { - each.b = 1.0f; - each.c -= 5.0f/((float)(opts.max_iterations/100.0f)); - } - grid_prime[x][y].a = rd_a_prime(grid, opts, x, y, laplacian_kernel, each.a, each.b); - grid_prime[x][y].b = rd_b_prime(grid, opts, x, y, laplacian_kernel, each.a, each.b); - } - } - - pthread_mutex_lock(&mutex); - if (++waiting_workers == warg->worker_count) - { - grid_temp = grid; - grid = grid_prime; - grid_prime = grid_temp; - waiting_workers = 0; - printf("worker-boss %d: completing report\n", warg->worker_id); - } - pthread_mutex_unlock(&mutex); - pthread_barrier_wait(&barrier); - - } - - // One last synchronization so boss thread doesn't die early - pthread_barrier_wait(&barrier); - should_quit = 1; - - printf("worker %d: exiting\n", warg->worker_id); - return _arg; -} - int initialize(int worker_count, RD_Opts active_opt) { grid_prime = (FVec2**)malloc(grid_size.x * sizeof(FVec2*)); @@ -144,9 +92,11 @@ int initialize(int worker_count, RD_Opts active_opt) srand(time(NULL)); + /* Set up as a circle */ float radius = grid_size.x/2.1f; int center_x = grid_size.x/2; int center_y = grid_size.y/2; + for (int x = 0; x < grid_size.x; x++) { for (int y = 0; y < grid_size.y; y++) @@ -154,53 +104,17 @@ int initialize(int worker_count, RD_Opts active_opt) grid[x][y].a = 1.0f; grid[x][y].b = 0.0f; - switch (active_opt.shape) + if ((sqrtf(((x-center_x)*(x-center_x))+((y-center_y)*(y-center_y))) < radius)) { - case eCircle: - { - if ((sqrtf(((x-center_x)*(x-center_x))+((y-center_y)*(y-center_y))) < radius)) - { - grid[x][y].c = 0.0f; - } - else { - grid[x][y].c = 1.0f; - } - } break; - - case eSquare: - default: - { - grid[x][y].c = 0.0f; - } break; - - case eBarrow: - { - if ((sqrtf(((x-center_x)*(x-center_x))+((y-center_y)*(y-center_y))) < radius)) - { - grid[x][y].c = 0.0f; - } - else { - grid[x][y].c = 1.0f; - } - - // Slap in an entrance or something here - // or add support for post-processing - if (abs(x-center_x) < 3 && (y <= (5 * (grid_size.y/8)))) - { - grid[x][y].c = 1.0f; - } - - if (abs(y-center_y) < 8 && (x <= (6 * (grid_size.x/8))) && (x >= (2 * (grid_size.x/8)))) - { - grid[x][y].c = 1.0f; - } - - } break; + grid[x][y].c = 0.0f; + } + else { + grid[x][y].c = 1.0f; } } } - /* Big seeds good for rooms */ + /* Set up seed-points to create connected rooms */ const int seed_count = sqrt(grid_size.x) * 2; const int width = 12 * (grid_size.x/128); const int height = width * 1; @@ -223,43 +137,6 @@ int initialize(int worker_count, RD_Opts active_opt) } } - warg = (worker_arg){ - active_opt, grid, grid_prime, - 0, 0, (grid_size.x), (grid_size.y), - .worker_count = worker_count, - .max_iterations = active_opt.max_iterations - }; - - threads = (pthread_t*)malloc(sizeof(pthread_t) * worker_count); - pthread_mutex_init(&mutex, NULL); - pthread_barrier_init(&barrier, NULL, warg.worker_count); - - worker_arg wargs[worker_count]; - - for (int t = 0; t < warg.worker_count; t++) - { - wargs[t] = warg; - wargs[t].worker_id = t; - wargs[t].width = (grid_size.x/worker_count) + ((t == worker_count-1) ? 0 : 4); - wargs[t].start_x = (wargs[t].width * t); - printf("worker %d x_span %d, %d\n", t, wargs[t].start_x, wargs[t].width); - pthread_create(&threads[t], NULL, iterator, &wargs[t]); - } - return 0; -} - -int cleanup() -{ - printf("boss: exiting loop\n"); - for (int t = 0; t < warg.worker_count; t++) - { - pthread_join(threads[t], NULL); - } - - free(grid); - free(grid_prime); - free(threads); - return 0; } @@ -269,6 +146,7 @@ int generate_rd(int worker_count, RD_Opts active_opt, FVec2 **grid_buffer, IVec2 grid_size = pgrid_size; initialize(worker_count, active_opt); + start(worker_count, active_opt); while(!should_quit) {} -- 2.49.0