refresh 2 graphs
This commit is contained in:
+13
-50
@@ -1,59 +1,22 @@
|
||||
CXX = g++
|
||||
CXXFLAGS = -O2 -std=c++17 -pthread
|
||||
|
||||
CXXFLAGS = -O3 -std=c++17 -pthread
|
||||
TARGET = lab2
|
||||
SRC = main.cpp
|
||||
|
||||
PY = python3
|
||||
EXPORTER = exporter.py
|
||||
|
||||
# ================= BUILD =================
|
||||
all: $(TARGET)
|
||||
|
||||
$(TARGET): $(SRC)
|
||||
$(CXX) $(CXXFLAGS) $< -o $@
|
||||
$(TARGET): main.cpp
|
||||
$(CXX) $(CXXFLAGS) main.cpp -o $(TARGET)
|
||||
|
||||
# ================= BATCH 1 =================
|
||||
# FIXED THREAD COUNT = 2
|
||||
# VARYING WORKLOAD SIZE
|
||||
batch_size: $(TARGET)
|
||||
@echo "=== BATCH 1: scaling by size (threads = 2) ==="
|
||||
./$(TARGET) 5000 2 > log_size_5k.txt
|
||||
./$(TARGET) 10000 2 > log_size_10k.txt
|
||||
./$(TARGET) 20000 2 > log_size_20k.txt
|
||||
./$(TARGET) 50000 2 > log_size_50k.txt
|
||||
./$(TARGET) 100000 2 > log_size_100k.txt
|
||||
run_all: $(TARGET)
|
||||
@mkdir -p out/timelines
|
||||
@echo "1. Generating Timelines (N=1000, Threshold=10)..."
|
||||
./$(TARGET) 1000 2 10 > out/timelines/log_t2.txt
|
||||
./$(TARGET) 1000 4 10 > out/timelines/log_t4.txt
|
||||
python3 exporter.py out/timelines/log_t2.txt out/timelines/
|
||||
python3 exporter.py out/timelines/log_t4.txt out/timelines/
|
||||
|
||||
# ================= BATCH 2 =================
|
||||
# FIXED LARGE WORKLOAD
|
||||
# VARYING THREAD COUNT
|
||||
batch_threads: $(TARGET)
|
||||
@echo "=== BATCH 2: scaling by threads (size = 50000) ==="
|
||||
./$(TARGET) 50000 0 > log_thr_0.txt
|
||||
./$(TARGET) 50000 2 > log_thr_2.txt
|
||||
./$(TARGET) 50000 4 > log_thr_4.txt
|
||||
./$(TARGET) 50000 8 > log_thr_8.txt
|
||||
@echo "2. Generating Efficiency Benchmark..."
|
||||
python3 benchmark.py
|
||||
|
||||
# ================= FULL BENCH =================
|
||||
logs: batch_size batch_threads
|
||||
|
||||
# ================= ANALYSIS =================
|
||||
analyze_size: batch_size
|
||||
$(PY) $(EXPORTER) log_size_5k.txt out/size/
|
||||
$(PY) $(EXPORTER) log_size_10k.txt out/size/
|
||||
$(PY) $(EXPORTER) log_size_20k.txt out/size/
|
||||
$(PY) $(EXPORTER) log_size_50k.txt out/size/
|
||||
$(PY) $(EXPORTER) log_size_100k.txt out/size/
|
||||
|
||||
analyze_threads: batch_threads
|
||||
$(PY) $(EXPORTER) log_thr_0.txt out/threads/
|
||||
$(PY) $(EXPORTER) log_thr_2.txt out/threads/
|
||||
$(PY) $(EXPORTER) log_thr_4.txt out/threads/
|
||||
$(PY) $(EXPORTER) log_thr_8.txt out/threads/
|
||||
|
||||
# ================= FULL ANALYZE =================
|
||||
analyze: logs analyze_size analyze_threads
|
||||
|
||||
# ================= CLEAN =================
|
||||
clean:
|
||||
rm -f $(TARGET) *.txt timeline.png
|
||||
rm -rf $(TARGET) *.txt out/
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def run_test(n, threads, threshold):
|
||||
cmd = f"./lab2 {n} {threads} {threshold}"
|
||||
process = subprocess.Popen(
|
||||
cmd.split(), stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
_, err = process.communicate()
|
||||
for line in err.split("\n"):
|
||||
if "STAT:" in line:
|
||||
return float(line.split("time=")[1])
|
||||
return 0
|
||||
|
||||
|
||||
def build_benchmark():
|
||||
n_size = 500000
|
||||
threshold = 10000
|
||||
thread_counts = [0, 1, 2, 4, 8, 12, 16]
|
||||
times = []
|
||||
|
||||
print(f"Запуск бенчмарка (N={n_size}, Порог={threshold})...")
|
||||
for t in thread_counts:
|
||||
t_exec = run_test(n_size, t, threshold)
|
||||
times.append(t_exec)
|
||||
print(f"Потоков: {t} | Время: {t_exec:.4f}с")
|
||||
|
||||
plt.figure(figsize=(12, 5))
|
||||
|
||||
# График времени
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.plot(thread_counts, times, "o-", color="blue", label="Фактическое время")
|
||||
plt.xlabel("Кол-во потоков (0 = посл.)")
|
||||
plt.ylabel("Время выполнения (сек)")
|
||||
plt.title("Зависимость времени от потоков")
|
||||
plt.grid(True)
|
||||
plt.legend()
|
||||
|
||||
# График ускорения
|
||||
plt.subplot(1, 2, 2)
|
||||
t_seq = times[0]
|
||||
speedup = [t_seq / x if x > 0 else 1 for x in times]
|
||||
plt.plot(thread_counts, speedup, "s-", color="green", label="Ускорение (S)")
|
||||
plt.plot(
|
||||
thread_counts,
|
||||
[x if x > 0 else 1 for x in thread_counts],
|
||||
"--",
|
||||
color="red",
|
||||
alpha=0.5,
|
||||
label="Идеал",
|
||||
)
|
||||
plt.xlabel("Кол-во потоков")
|
||||
plt.ylabel("S = T(послед) / T(паралл)")
|
||||
plt.title("График масштабируемости (Speedup)")
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig("out/performance_results.png")
|
||||
print("Графики сохранены в out/performance_results.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
os.makedirs("out", exist_ok=True)
|
||||
build_benchmark()
|
||||
+16
-86
@@ -6,112 +6,56 @@ from collections import defaultdict
|
||||
import matplotlib
|
||||
|
||||
try:
|
||||
matplotlib.use("QtAgg")
|
||||
except Exception:
|
||||
matplotlib.use("Agg")
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# ================= INPUT =================
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python analyze_log.py <logfile> [output_dir]")
|
||||
print("Использование: python exporter.py <logfile> [output_dir]")
|
||||
sys.exit(1)
|
||||
|
||||
logfile = sys.argv[1]
|
||||
out_dir = sys.argv[2] if len(sys.argv) >= 3 else "out"
|
||||
|
||||
# ================= PATH LOGIC =================
|
||||
base_name = os.path.splitext(os.path.basename(logfile))[0]
|
||||
|
||||
# normalize folder structure
|
||||
out_dir = os.path.normpath(out_dir)
|
||||
|
||||
pics_dir = os.path.join(out_dir, "pics")
|
||||
tables_dir = os.path.join(out_dir, "tables")
|
||||
|
||||
os.makedirs(pics_dir, exist_ok=True)
|
||||
os.makedirs(tables_dir, exist_ok=True)
|
||||
|
||||
output_png = os.path.join(pics_dir, f"{base_name}.png")
|
||||
output_md = os.path.join(tables_dir, f"{base_name}.md")
|
||||
|
||||
|
||||
# ================= PARSE =================
|
||||
pattern = re.compile(r"(START|END).*TID=(\d+).*range=\[(\d+),(\d+)\].*time=([\d.]+)")
|
||||
|
||||
events = defaultdict(dict)
|
||||
|
||||
with open(logfile) as f:
|
||||
for line in f:
|
||||
m = pattern.search(line)
|
||||
if not m:
|
||||
continue
|
||||
if m:
|
||||
typ, tid, l, r, t = m.groups()
|
||||
key = (tid, int(l), int(r))
|
||||
events[key][typ] = float(t)
|
||||
|
||||
typ, tid, l, r, t = m.groups()
|
||||
key = (tid, int(l), int(r))
|
||||
events[key][typ] = float(t)
|
||||
|
||||
|
||||
# ================= BUILD ROWS =================
|
||||
rows = []
|
||||
|
||||
for (tid, l, r), v in events.items():
|
||||
if "START" in v and "END" in v:
|
||||
start = v["START"]
|
||||
end = v["END"]
|
||||
duration = end - start
|
||||
|
||||
rows.append(
|
||||
{
|
||||
"tid": tid,
|
||||
"range": f"[{l},{r}]",
|
||||
"start": start,
|
||||
"end": end,
|
||||
"duration": duration,
|
||||
"start": v["START"],
|
||||
"end": v["END"],
|
||||
"duration": v["END"] - v["START"],
|
||||
}
|
||||
)
|
||||
|
||||
rows.sort(key=lambda x: x["start"])
|
||||
|
||||
if not rows:
|
||||
print("No valid events found")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# ================= OFFSET =================
|
||||
rows.sort(key=lambda x: x["start"])
|
||||
t0 = rows[0]["start"]
|
||||
for r in rows:
|
||||
r["offset"] = r["start"] - t0
|
||||
|
||||
|
||||
# ================= COLOR MAP (LOGICAL) =================
|
||||
unique_tids = sorted(set(r["tid"] for r in rows))
|
||||
color_map = {tid: plt.cm.tab20(i % 20) for i, tid in enumerate(unique_tids)}
|
||||
|
||||
color_map = {
|
||||
tid: plt.cm.tab20(i % 20) # stable, readable palette
|
||||
for i, tid in enumerate(unique_tids)
|
||||
}
|
||||
|
||||
|
||||
# ================= SAVE MARKDOWN =================
|
||||
with open(output_md, "w") as f:
|
||||
f.write("# Execution Table\n\n")
|
||||
f.write("| TID | Range | Start | End | Duration | Offset |\n")
|
||||
f.write("|-----|-------|-------|-----|----------|--------|\n")
|
||||
|
||||
for r in rows:
|
||||
f.write(
|
||||
f"| {r['tid']} | {r['range']} | "
|
||||
f"{r['start']:.6f} | {r['end']:.6f} | "
|
||||
f"{r['duration']:.6f} | {r['offset']:.6f} |\n"
|
||||
)
|
||||
|
||||
print(f"[OK] Table saved: {output_md}")
|
||||
|
||||
|
||||
# ================= PLOT =================
|
||||
plt.figure(figsize=(12, 6))
|
||||
|
||||
for i, r in enumerate(rows):
|
||||
plt.plot(
|
||||
[r["offset"], r["offset"] + r["duration"]],
|
||||
@@ -120,23 +64,9 @@ for i, r in enumerate(rows):
|
||||
linewidth=4,
|
||||
)
|
||||
|
||||
# legend (TID → color)
|
||||
for tid in unique_tids:
|
||||
plt.plot([], [], color=color_map[tid], label=f"TID {tid}")
|
||||
|
||||
plt.legend(loc="upper right", fontsize=8)
|
||||
|
||||
plt.xlabel("Time (seconds from start)")
|
||||
plt.ylabel("Tasks")
|
||||
plt.title(f"Execution Timeline: {base_name}")
|
||||
plt.xlabel("Время (сек. от начала)")
|
||||
plt.ylabel("Задачи (рекурсивные вызовы)")
|
||||
plt.title(f"Временная диаграмма выполнения: {base_name}")
|
||||
plt.grid(True)
|
||||
plt.tight_layout()
|
||||
|
||||
|
||||
# ================= SAVE IMAGE =================
|
||||
plt.savefig(output_png, dpi=200)
|
||||
print(f"[OK] Plot saved: {output_png}")
|
||||
|
||||
|
||||
# ================= SHOW =================
|
||||
# plt.show()
|
||||
plt.savefig(os.path.join(pics_dir, f"{base_name}.png"))
|
||||
|
||||
+41
-31
@@ -8,7 +8,6 @@
|
||||
#include <unistd.h>
|
||||
#include <sstream>
|
||||
|
||||
// ================= СТРУКТУРЫ =================
|
||||
constexpr int NIL = -1;
|
||||
|
||||
struct Node {
|
||||
@@ -25,36 +24,34 @@ struct Args {
|
||||
Node* pool;
|
||||
int head;
|
||||
int depth;
|
||||
int size;
|
||||
int threshold;
|
||||
SortResult* res_out;
|
||||
};
|
||||
|
||||
// ================= ГЛОБАЛЬНЫЕ ПЕРЕМЕННЫЕ =================
|
||||
int active_threads = 0;
|
||||
int max_threads = 4;
|
||||
pthread_mutex_t counter_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
pthread_mutex_t log_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
bool enable_logging = false;
|
||||
|
||||
// Высокоточное логирование для exporter.py
|
||||
void log_event(const std::string& type, int head, int depth) {
|
||||
if (!enable_logging) return;
|
||||
pthread_mutex_lock(&log_mutex);
|
||||
|
||||
// ================= УТИЛИТЫ =================
|
||||
std::string now() {
|
||||
timeval tv{};
|
||||
gettimeofday(&tv, nullptr);
|
||||
std::ostringstream oss;
|
||||
oss << tv.tv_sec << "." << std::setfill('0') << std::setw(6) << tv.tv_usec;
|
||||
return oss.str();
|
||||
}
|
||||
double current_time = tv.tv_sec + tv.tv_usec * 1e-6;
|
||||
|
||||
// Формат лога адаптирован под ваш скрипт analyze_log.py
|
||||
void log_event(const std::string& type, int head, int depth) {
|
||||
pthread_mutex_lock(&log_mutex);
|
||||
// Используем head как идентификатор диапазона для парсера
|
||||
std::cout << type << " TID=" << pthread_self()
|
||||
<< " depth=" << depth
|
||||
<< " range=[" << head << "," << head << "]"
|
||||
<< " time=" << now() << std::endl;
|
||||
<< " time=" << std::fixed << std::setprecision(6) << current_time << std::endl;
|
||||
|
||||
pthread_mutex_unlock(&log_mutex);
|
||||
}
|
||||
|
||||
// ================= ЛОГИКА СПИСКА =================
|
||||
void split_list(Node* pool, int head, int& left, int& right) {
|
||||
if (head == NIL || pool[head].next == NIL) {
|
||||
left = head; right = NIL; return;
|
||||
@@ -88,16 +85,15 @@ SortResult merge_lists(Node* pool, int l, int r) {
|
||||
return {res_head, comps};
|
||||
}
|
||||
|
||||
// ================= РЕКУРСИЯ =================
|
||||
SortResult parallel_list_sort(Node* pool, int head, int depth);
|
||||
SortResult parallel_list_sort(Node* pool, int head, int depth, int size, int threshold);
|
||||
|
||||
void* thread_func(void* arg) {
|
||||
Args* a = (Args*)arg;
|
||||
*(a->res_out) = parallel_list_sort(a->pool, a->head, a->depth);
|
||||
*(a->res_out) = parallel_list_sort(a->pool, a->head, a->depth, a->size, a->threshold);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SortResult parallel_list_sort(Node* pool, int head, int depth) {
|
||||
SortResult parallel_list_sort(Node* pool, int head, int depth, int size, int threshold) {
|
||||
log_event("START", head, depth);
|
||||
if (head == NIL || pool[head].next == NIL) {
|
||||
log_event("END", head, depth);
|
||||
@@ -106,23 +102,25 @@ SortResult parallel_list_sort(Node* pool, int head, int depth) {
|
||||
|
||||
int left_p, right_p;
|
||||
split_list(pool, head, left_p, right_p);
|
||||
int new_size = size / 2;
|
||||
|
||||
pthread_t tid;
|
||||
bool spawned = false;
|
||||
SortResult res_right = {NIL, 0};
|
||||
|
||||
pthread_mutex_lock(&counter_mutex);
|
||||
if (active_threads < max_threads) {
|
||||
active_threads++;
|
||||
spawned = true;
|
||||
if (size > threshold) {
|
||||
pthread_mutex_lock(&counter_mutex);
|
||||
if (active_threads < max_threads) {
|
||||
active_threads++;
|
||||
spawned = true;
|
||||
}
|
||||
pthread_mutex_unlock(&counter_mutex);
|
||||
}
|
||||
pthread_mutex_unlock(&counter_mutex);
|
||||
|
||||
if (spawned) {
|
||||
Args* args = new Args{pool, right_p, depth + 1, &res_right};
|
||||
Args* args = new Args{pool, right_p, depth + 1, new_size, threshold, &res_right};
|
||||
pthread_create(&tid, nullptr, thread_func, args);
|
||||
|
||||
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1);
|
||||
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1, new_size, threshold);
|
||||
pthread_join(tid, nullptr);
|
||||
|
||||
pthread_mutex_lock(&counter_mutex);
|
||||
@@ -134,8 +132,8 @@ SortResult parallel_list_sort(Node* pool, int head, int depth) {
|
||||
log_event("END", m.head, depth);
|
||||
return {m.head, res_left.comparisons + res_right.comparisons + m.comparisons};
|
||||
} else {
|
||||
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1);
|
||||
SortResult res_r_seq = parallel_list_sort(pool, right_p, depth + 1);
|
||||
SortResult res_left = parallel_list_sort(pool, left_p, depth + 1, new_size, threshold);
|
||||
SortResult res_r_seq = parallel_list_sort(pool, right_p, depth + 1, new_size, threshold);
|
||||
SortResult m = merge_lists(pool, res_left.head, res_r_seq.head);
|
||||
log_event("END", m.head, depth);
|
||||
return {m.head, res_left.comparisons + res_r_seq.comparisons + m.comparisons};
|
||||
@@ -143,17 +141,29 @@ SortResult parallel_list_sort(Node* pool, int head, int depth) {
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int n = 10000;
|
||||
int n = 100000;
|
||||
int threshold = 5000;
|
||||
if (argc >= 2) n = std::atoi(argv[1]);
|
||||
if (argc >= 3) max_threads = std::atoi(argv[2]);
|
||||
if (argc >= 4) threshold = std::atoi(argv[3]);
|
||||
|
||||
// Включаем логи только для маленьких N
|
||||
if (n < 5000) enable_logging = true;
|
||||
|
||||
std::vector<Node> pool(n);
|
||||
std::mt19937 rng(time(0));
|
||||
std::mt19937 rng(1337);
|
||||
for (int i = 0; i < n; i++) {
|
||||
pool[i].value = rng() % 100000;
|
||||
pool[i].next = (i == n - 1) ? NIL : i + 1;
|
||||
}
|
||||
|
||||
parallel_list_sort(pool.data(), 0, 0);
|
||||
timeval t1, t2;
|
||||
gettimeofday(&t1, nullptr);
|
||||
SortResult final_res = parallel_list_sort(pool.data(), 0, 0, n, threshold);
|
||||
gettimeofday(&t2, nullptr);
|
||||
|
||||
double elapsed = (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) * 1e-6;
|
||||
std::cerr << "STAT: threads=" << max_threads << " size=" << n << " threshold=" << threshold << " time=" << elapsed << std::endl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
+399998
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user