refresh 2 graphs

This commit is contained in:
2026-04-28 15:42:35 +07:00
parent d2f31b91bf
commit 6b28730549
5 changed files with 400136 additions and 167 deletions
+41 -31
View File
@@ -8,7 +8,6 @@
#include <unistd.h>
#include <sstream>
// ================= СТРУКТУРЫ =================
constexpr int NIL = -1;
struct Node {
@@ -25,36 +24,34 @@ struct Args {
Node* pool;
int head;
int depth;
int size;
int threshold;
SortResult* res_out;
};
// ================= ГЛОБАЛЬНЫЕ ПЕРЕМЕННЫЕ =================
int active_threads = 0;
int max_threads = 4;
pthread_mutex_t counter_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t log_mutex = PTHREAD_MUTEX_INITIALIZER;
bool enable_logging = false;
// Высокоточное логирование для exporter.py
void log_event(const std::string& type, int head, int depth) {
if (!enable_logging) return;
pthread_mutex_lock(&log_mutex);
// ================= УТИЛИТЫ =================
std::string now() {
timeval tv{};
gettimeofday(&tv, nullptr);
std::ostringstream oss;
oss << tv.tv_sec << "." << std::setfill('0') << std::setw(6) << tv.tv_usec;
return oss.str();
}
double current_time = tv.tv_sec + tv.tv_usec * 1e-6;
// Формат лога адаптирован под ваш скрипт analyze_log.py
void log_event(const std::string& type, int head, int depth) {
pthread_mutex_lock(&log_mutex);
// Используем head как идентификатор диапазона для парсера
std::cout << type << " TID=" << pthread_self()
<< " depth=" << depth
<< " range=[" << head << "," << head << "]"
<< " time=" << now() << std::endl;
<< " time=" << std::fixed << std::setprecision(6) << current_time << std::endl;
pthread_mutex_unlock(&log_mutex);
}
// ================= ЛОГИКА СПИСКА =================
void split_list(Node* pool, int head, int& left, int& right) {
if (head == NIL || pool[head].next == NIL) {
left = head; right = NIL; return;
@@ -88,16 +85,15 @@ SortResult merge_lists(Node* pool, int l, int r) {
return {res_head, comps};
}
// ================= РЕКУРСИЯ =================
SortResult parallel_list_sort(Node* pool, int head, int depth);
SortResult parallel_list_sort(Node* pool, int head, int depth, int size, int threshold);
void* thread_func(void* arg) {
Args* a = (Args*)arg;
*(a->res_out) = parallel_list_sort(a->pool, a->head, a->depth);
*(a->res_out) = parallel_list_sort(a->pool, a->head, a->depth, a->size, a->threshold);
return nullptr;
}
SortResult parallel_list_sort(Node* pool, int head, int depth) {
SortResult parallel_list_sort(Node* pool, int head, int depth, int size, int threshold) {
log_event("START", head, depth);
if (head == NIL || pool[head].next == NIL) {
log_event("END", head, depth);
@@ -106,23 +102,25 @@ SortResult parallel_list_sort(Node* pool, int head, int depth) {
int left_p, right_p;
split_list(pool, head, left_p, right_p);
int new_size = size / 2;
pthread_t tid;
bool spawned = false;
SortResult res_right = {NIL, 0};
pthread_mutex_lock(&counter_mutex);
if (active_threads < max_threads) {
active_threads++;
spawned = true;
if (size > threshold) {
pthread_mutex_lock(&counter_mutex);
if (active_threads < max_threads) {
active_threads++;
spawned = true;
}
pthread_mutex_unlock(&counter_mutex);
}
pthread_mutex_unlock(&counter_mutex);
if (spawned) {
Args* args = new Args{pool, right_p, depth + 1, &res_right};
Args* args = new Args{pool, right_p, depth + 1, new_size, threshold, &res_right};
pthread_create(&tid, nullptr, thread_func, args);
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1);
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1, new_size, threshold);
pthread_join(tid, nullptr);
pthread_mutex_lock(&counter_mutex);
@@ -134,8 +132,8 @@ SortResult parallel_list_sort(Node* pool, int head, int depth) {
log_event("END", m.head, depth);
return {m.head, res_left.comparisons + res_right.comparisons + m.comparisons};
} else {
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1);
SortResult res_r_seq = parallel_list_sort(pool, right_p, depth + 1);
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1, new_size, threshold);
SortResult res_r_seq = parallel_list_sort(pool, right_p, depth + 1, new_size, threshold);
SortResult m = merge_lists(pool, res_left.head, res_r_seq.head);
log_event("END", m.head, depth);
return {m.head, res_left.comparisons + res_r_seq.comparisons + m.comparisons};
@@ -143,17 +141,29 @@ SortResult parallel_list_sort(Node* pool, int head, int depth) {
}
int main(int argc, char* argv[]) {
int n = 10000;
int n = 100000;
int threshold = 5000;
if (argc >= 2) n = std::atoi(argv[1]);
if (argc >= 3) max_threads = std::atoi(argv[2]);
if (argc >= 4) threshold = std::atoi(argv[3]);
// Включаем логи только для маленьких N
if (n < 5000) enable_logging = true;
std::vector<Node> pool(n);
std::mt19937 rng(time(0));
std::mt19937 rng(1337);
for (int i = 0; i < n; i++) {
pool[i].value = rng() % 100000;
pool[i].next = (i == n - 1) ? NIL : i + 1;
}
parallel_list_sort(pool.data(), 0, 0);
timeval t1, t2;
gettimeofday(&t1, nullptr);
SortResult final_res = parallel_list_sort(pool.data(), 0, 0, n, threshold);
gettimeofday(&t2, nullptr);
double elapsed = (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) * 1e-6;
std::cerr << "STAT: threads=" << max_threads << " size=" << n << " threshold=" << threshold << " time=" << elapsed << std::endl;
return 0;
}