Files
Andreas Wilms 78481ca337 init
python
2025-09-09 19:40:44 +02:00

2119 lines
87 KiB
Python

import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy.stats as statsSC
import math
from scipy import stats
import os
#collection of functions to evaluate the data of the study
def get_eval_blocks(path: str) -> list:
"""
Erstellen einer Liste von Dictionaries, die Daten der übergebenen Studie enthalten.
Args:
path: Pfad zum Ordner mit den JSON-Dateien
Returns:
data_bundle: Liste von Dictionaries, die die Daten der Studie enthalten
Beispiel:
[
{
"worker": "worker_id",
"data": [
{
"cloudOne": "cloudOne",
"cloudTwo": "cloudTwo",
"controlLetterOne": "controlLetterOne",
"controlLetterTwo": "controlLetterTwo",
"responseOneLetter": "responseOneLetter",
"responseTwoLetter": "responseTwoLetter",
"responseOneQuality": "responseOneQuality",
"responseTwoQuality": "responseTwoQuality",
"responseDifference": "responseDifference",
"positionOne": [x, y],
"positionTwo": [x, y],
"perspectives01": [{x, y, z}],
"perspectives02": [{x, y, z}],
"total_time_1": time,
"total_time_2": time,
"tabswitch": int
}
],
"wrong_elements": int,
"blurs": int
}
"""
files = os.listdir(path)
workers = []
data_bundle: list = []
for i in range(len(files)):
file_path = os.path.join(path, files[i])
# JSON-Datei öffnen und laden
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
blocks = []
for item in data:
if item.get('type') is not None:
if item.get('type') != 'trial':
blocks.append(item)
study_run = {
"worker": data[len(data)-1].get('worker'),
"data": [],
"wrong_elements": 0,
"blurs": 0,
"total_time": 0,
}
if study_run["worker"] in workers:
continue
else:
workers.append(study_run["worker"])
start_time: float = 0
end_time: float = 0
for i in range(0, len(blocks), 4):
eval_obj = {
"cloudOne": blocks[i].get('cloudOne'),
"cloudTwo": blocks[i].get('cloudTwo'),
"controlLetterOne": blocks[i].get('controlLetter1'),
"controlLetterTwo": blocks[i].get('controlLetter2'),
"responseOneLetter": json.loads(blocks[i+1].get('responses'))["Q1"],
"responseTwoLetter": json.loads(blocks[i+3].get('responses'))["Q2"],
"responseOneQuality": json.loads(blocks[i+1].get('responses'))["Q0"],
"responseTwoQuality": json.loads(blocks[i+3].get('responses'))["Q0"],
"responseDifference": json.loads(blocks[i+3].get('responses'))["Q1"],
"positionOne": blocks[i].get('positions')[0],
"positionTwo": blocks[i].get('positions')[1],
"perspectives01": blocks[i].get('perspectives'),
"perspectives02": blocks[i+2].get('perspectives'),
"total_time_1": blocks[i+1].get('time_elapsed') - blocks[i].get('time_elapsed'),
"total_time_2": blocks[i+3].get('time_elapsed') - blocks[i+2].get('time_elapsed'),
"tabswitch": blocks[i].get('tab_switch') + blocks[i].get('tab_switch'),
}
if i == 0:
start_time = blocks[i].get('time_elapsed')
if i == len(blocks)-4:
end_time = blocks[i+3].get('time_elapsed')
study_run["data"].append(eval_obj)
study_run["total_time"] = end_time - start_time
data_bundle.append(study_run)
for i in data_bundle:
for j in i["data"]:
if j["controlLetterOne"] != j["responseOneLetter"]:
i["wrong_elements"] += 1
if j["controlLetterTwo"] != j["responseTwoLetter"]:
i["wrong_elements"] += 1
i["blurs"] += j["tabswitch"]
return data_bundle
x = []
y = []
for i in data_objs:
for j in i["perspectives01"][0]:
x.append(j["x"])
y.append(j["y"])
for j in i["perspectives02"][0]:
x.append(j["x"])
y.append(j["y"])
return x, y
def get_eval_blocks_2_elements(path: str) -> list:
"""
Das gleich wie get_eval_blocks nur für 2 Elemente
"""
files = os.listdir(path)
workers = []
data_bundle: list = []
for i in range(len(files)):
file_path = os.path.join(path, files[i])
# JSON-Datei öffnen und laden
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
blocks = []
for item in data:
if item.get('type') is not None:
if item.get('type') != 'trial':
blocks.append(item)
study_run = {
"worker": data[len(data)-1].get('worker'),
"data": [],
"wrong_elements": 0,
"blurs": 0,
"total_time": 0,
}
if study_run["worker"] in workers:
continue
else:
workers.append(study_run["worker"])
start_time: float = 0
end_time: float = 0
for i in range(0, len(blocks), 4):
eval_obj = {
"cloudOne": blocks[i].get('cloudOne'),
"cloudTwo": blocks[i].get('cloudTwo'),
"controlLetterOne": blocks[i].get('controlLetter1'),
"controlLetterTwo": blocks[i].get('controlLetter2'),
"controlLetterThree": blocks[i].get('controlLetter3'),
"controlLetterFour": blocks[i].get('controlLetter4'),
"responseOneLetter": json.loads(blocks[i+1].get('responses'))["Q1"],
"responseTwoLetter": json.loads(blocks[i+1].get('responses'))["Q2"],
"responseThreeLetter": json.loads(blocks[i+3].get('responses'))["Q2"],
"responseFourLetter": json.loads(blocks[i+3].get('responses'))["Q3"],
"responseOneQuality": json.loads(blocks[i+1].get('responses'))["Q0"],
"responseTwoQuality": json.loads(blocks[i+3].get('responses'))["Q0"],
"responseDifference": json.loads(blocks[i+3].get('responses'))["Q1"],
"positionOne": blocks[i].get('positions')[0],
"positionTwo": blocks[i].get('positions')[1],
"positionThree": blocks[i].get('positions')[2],
"positionFour": blocks[i].get('positions')[3],
"perspectives01": blocks[i].get('perspectives'),
"perspectives02": blocks[i+2].get('perspectives'),
"total_time_1": blocks[i+1].get('time_elapsed') - blocks[i].get('time_elapsed'),
"total_time_2": blocks[i+3].get('time_elapsed') - blocks[i+2].get('time_elapsed'),
"tabswitch": blocks[i].get('tab_switch') + blocks[i].get('tab_switch'),
}
if i == 0:
start_time = blocks[i].get('time_elapsed')
if i == len(blocks)-4:
end_time = blocks[i+3].get('time_elapsed')
study_run["data"].append(eval_obj)
study_run["total_time"] = end_time - start_time
data_bundle.append(study_run)
for i in data_bundle:
for j in i["data"]:
if j["controlLetterOne"] != j["responseOneLetter"] or j["controlLetterTwo"] != j["responseTwoLetter"]:
i["wrong_elements"] += 1
if j["controlLetterThree"] != j["responseThreeLetter"] or j["controlLetterFour"] != j["responseFourLetter"]:
i["wrong_elements"] += 1
i["blurs"] += j["tabswitch"]
return data_bundle
def get_eval_blocks_3_elements(path: str) -> list:
"""
Das gleich wie get_eval_blocks nur für 3 Elemente
"""
files = os.listdir(path)
workers =[]
data_bundle: list = []
for i in range(len(files)):
file_path = os.path.join(path, files[i])
# JSON-Datei öffnen und laden
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
blocks = []
for item in data:
if item.get('type') is not None:
if item.get('type') != 'trial':
blocks.append(item)
study_run = {
"worker": data[len(data)-1].get('worker'),
"data": [],
"wrong_elements": 0,
"blurs": 0,
"total_time": 0,
}
if study_run["worker"] in workers:
continue
else:
workers.append(study_run["worker"])
start_time: float = 0
end_time: float = 0
for i in range(0, len(blocks), 4):
eval_obj = {
"cloudOne": blocks[i].get('cloudOne'),
"cloudTwo": blocks[i].get('cloudTwo'),
"controlLetterOne": blocks[i].get('controlLetter1'),
"controlLetterTwo": blocks[i].get('controlLetter2'),
"controlLetterThree": blocks[i].get('controlLetter3'),
"controlLetterFour": blocks[i].get('controlLetter4'),
"controlLetterFive": blocks[i].get('controlLetter5'),
"controlLetterSix": blocks[i].get('controlLetter6'),
"responseOneLetter": json.loads(blocks[i+1].get('responses'))["Q1"],
"responseTwoLetter": json.loads(blocks[i+1].get('responses'))["Q2"],
"responseThreeLetter": json.loads(blocks[i+1].get('responses'))["Q3"],
"responseFourLetter": json.loads(blocks[i+3].get('responses'))["Q2"],
"responseFiveLetter": json.loads(blocks[i+3].get('responses'))["Q3"],
"responseSixLetter": json.loads(blocks[i+3].get('responses'))["Q4"],
"responseOneQuality": json.loads(blocks[i+1].get('responses'))["Q0"],
"responseTwoQuality": json.loads(blocks[i+3].get('responses'))["Q0"],
"responseDifference": json.loads(blocks[i+3].get('responses'))["Q1"],
"positionOne": blocks[i].get('positions')[0],
"positionTwo": blocks[i].get('positions')[1],
"positionThree": blocks[i].get('positions')[2],
"positionFour": blocks[i].get('positions')[3],
"positionFive": blocks[i].get('positions')[4],
"positionSix": blocks[i].get('positions')[5],
"perspectives01": blocks[i].get('perspectives'),
"perspectives02": blocks[i+2].get('perspectives'),
"total_time_1": blocks[i+1].get('time_elapsed') - blocks[i].get('time_elapsed'),
"total_time_2": blocks[i+3].get('time_elapsed') - blocks[i+2].get('time_elapsed'),
"tabswitch": blocks[i].get('tab_switch') + blocks[i].get('tab_switch'),
}
if i == 0:
start_time = blocks[i].get('time_elapsed')
if i == len(blocks)-4:
end_time = blocks[i+3].get('time_elapsed')
study_run["data"].append(eval_obj)
study_run["total_time"] = end_time - start_time
data_bundle.append(study_run)
for i in data_bundle:
for j in i["data"]:
if j["controlLetterOne"] != j["responseOneLetter"] or j["controlLetterTwo"] != j["responseTwoLetter"] or j["controlLetterThree"] != j["responseThreeLetter"]:
i["wrong_elements"] += 1
if j["controlLetterFour"] != j["responseFourLetter"] or j["controlLetterFive"] != j["responseFiveLetter"] or j["controlLetterSix"] != j["responseSixLetter"]:
i["wrong_elements"] += 1
i["blurs"] += j["tabswitch"]
return data_bundle
def get_successful_blocks(blocks: list, max_blurs: int, max_wrong_elements: int, only_by_tabs="false") -> list:
"""
Filtert die übergebenen Blöcke nach den übergebenen Parametern und gibt eine Liste der erfolgreichen Blöcke zurück.
Args:
blocks: Liste von Blöcken
max_blurs: Maximale Anzahl von Blurs
max_wrong_elements: Maximale Anzahl von
Returns:
successful_blocks: Liste von erfolgreichen Blöcken
Beispiel:
blocks = [
{
"worker": "worker_id",
"data": [
{
"cloudOne": "cloudOne",
"cloudTwo": "cloudTwo",
"controlLetterOne": "controlLetterOne",
"controlLetterTwo": "controlLetterTwo",
"responseOneLetter": "responseOneLetter",
"responseTwoLetter": "responseTwoLetter",
"responseOneQuality": "responseOneQuality",
"responseTwoQuality": "responseTwoQuality",
"responseDifference": "responseDifference",
"positionOne": [x, y],
"positionTwo": [x, y],
"perspectives01": [{x, y, z}],
"perspectives02": [{x, y, z}],
"total_time_1": time,
"total_time_2": time,
"tabswitch": int
}
],
"wrong_elements": int,
"blurs": int
}
]
"""
successful_blocks: list = []
if only_by_tabs == "false":
for i in blocks:
if i["wrong_elements"] <= max_wrong_elements and i["blurs"] <= max_blurs:
successful_blocks.append(i)
elif only_by_tabs == "true":
for i in blocks:
if i["blurs"] <= max_blurs:
successful_blocks.append(i)
return successful_blocks
def build_csv_general_user_statistics(blocks: list, max_blurs: int, max_wrong_elements: int, csv_name: str, export_path: str)-> list:
"""
Erstellt eine CSV-Datei mit allgemeinen Statistiken über die übergebenen Blöcke.
Args:
blocks: Liste von Blöcken
max_blurs: Maximale Anzahl von Blurs
max_wrong_elements: Maximale Anzahl von
csv_name: Name der CSV-Datei
Returns:
data: Liste mit allgemeinen Statistiken
CSV-Datei Format:
number_of_users, number_of_successful_users, success_rate, avg_time
"""
successful_blocks = get_successful_blocks(blocks, max_blurs, max_wrong_elements)
succ_element_count = 0
for i in blocks:
if i["blurs"] <= max_blurs:
succ_element_count += 1
data = {
"number_of_users": len(blocks),
"number_of_successful_by_rules": succ_element_count,
"number_of_successful_users": len(successful_blocks),
"success_rate": len(successful_blocks) / succ_element_count,
"avg_time": 0,
}
for i in successful_blocks:
data["avg_time"] += i["total_time"]
data["avg_time"] = data["avg_time"] / len(successful_blocks) / 1000 / 60
df = pd.DataFrame([data])
df.to_csv(export_path+"/"+csv_name+".csv", index=False)
return [data]
def build_csv_user_statistics(blocks: list, max_blurs:int, max_wrong_elements:int, csv_name:str, export_path:str)->list:
"""
Erstellt eine CSV-Datei mit Statistiken über die übergebenen Blöcke.
Ein Block enthält die Daten eines Nutzers seiner Studie.
Args:
blocks: Liste von Blöcken / Studien
max_blurs: Maximale Anzahl von Blurs
max_wrong_elements: Maximale Anzahl von
csv_name: Name der zu exportierenden CSV-Datei
Returns:
data: Liste mit Statistiken über die übergebenen Blöcke
CSV-Datei Format:
worker, number_of_blurs, number_of_wrong_elements, is_successful, comment
"""
data:list = []
for i in blocks:
user_data = {
"worker": i["worker"],
"number_of_blurs": i["blurs"],
"number_of_wrong_elements": i["wrong_elements"],
"is_successful": False,
"comment": "",
}
if i["wrong_elements"] <= max_wrong_elements and i["blurs"] <= max_blurs:
user_data["is_successful"] = True
if i["wrong_elements"] > max_wrong_elements and i["blurs"] > max_blurs:
user_data["comment"] = "Too many wrong elements and blurs"
elif i["wrong_elements"] > max_wrong_elements:
user_data["comment"] = "Too many wrong elements"
elif i["blurs"] > max_blurs:
user_data["comment"] = "Too many blurs"
data.append(user_data)
df = pd.DataFrame(data)
df.to_csv(export_path+"/"+csv_name+".csv", index=False)
return data
def get_bremen_blocks(blocks: list) -> list:
"""
Filtert die übergebenen Blöcke nach den Bremen-Blöcken und gibt eine Liste der Bremen-Blöcke zurück.
Args:
blocks: Liste von Blöcken
Beispiel:
blocks = [
{
"worker": "worker_id",
"data": [
{
"cloudOne": "cloudOne",
"cloudTwo": "cloudTwo",
"controlLetterOne": "controlLetterOne",
"controlLetterTwo": "controlLetterTwo",
"responseOneLetter": "responseOneLetter",
"responseTwoLetter": "responseTwoLetter",
"responseOneQuality": "responseOneQuality",
"responseTwoQuality": "responseTwoQuality",
"responseDifference": "responseDifference",
"positionOne": [x, y],
"positionTwo": [x, y],
"perspectives01": [{x, y, z}],
"perspectives02": [{x, y, z}],
"total_time_1": time,
"total_time_2": time,
"tabswitch": int
}
],
"wrong_elements": int,
"blurs": int
}
]
Returns:
bremen_blocks: Liste der Bremen-Blöcke
Beispiel:
bremen_blocks = [
{
"cloudOne": "cloudOne",
"cloudTwo": "cloudTwo",
"controlLetterOne": "controlLetterOne",
"controlLetterTwo": "controlLetterTwo",
"responseOneLetter": "responseOneLetter",
"responseTwoLetter": "responseTwoLetter",
"responseOneQuality": "responseOneQuality",
"responseTwoQuality": "responseTwoQuality",
"responseDifference": "responseDifference",
"positionOne": [x, y],
"positionTwo": [x, y],
"perspectives01": [{x, y, z}],
"perspectives02": [{x, y, z}],
"total_time_1": time,
"total_time_2": time,
"tabswitch": int
}
]
"""
bremen_blocks = []
for i in blocks:
for j in i["data"]:
# Bremen Clouds haben nach dem Q immer 0 Bsp: Q00 ("original"), Q01 ("V4"), Q02 ("V8")
if j["cloudOne"][1] == "0":
bremen_blocks.append(j)
return bremen_blocks
def get_randersacker_blocks(blocks: list) -> list:
"""
Filtert die übergebenen Blöcke nach den Randersacker-Blöcken und gibt eine Liste der Randersacker-Blöcke zurück.
Args:
blocks: Liste von Blöcken
Beispiel:
blocks = [
{
"worker": "worker_id",
"data": [
{
"cloudOne": "cloudOne",
"cloudTwo": "cloudTwo",
"controlLetterOne": "controlLetterOne",
"controlLetterTwo": "controlLetterTwo",
"responseOneLetter": "responseOneLetter",
"responseTwoLetter": "responseTwoLetter",
"responseOneQuality": "responseOneQuality",
"responseTwoQuality": "responseTwoQuality",
"responseDifference": "responseDifference",
"positionOne": [x, y],
"positionTwo": [x, y],
"perspectives01": [{x, y, z}],
"perspectives02": [{x, y, z}],
"total_time_1": time,
"total_time_2": time,
"tabswitch": int
}
],
"wrong_elements": int,
"blurs": int
}
]
Returns:
randersacker_blocks: Liste der Randersacker-Blöcke
Beispiel:
randersacker_blocks = [
{
"cloudOne": "cloudOne",
"cloudTwo": "cloudTwo",
"controlLetterOne": "controlLetterOne",
"controlLetterTwo": "controlLetterTwo",
"responseOneLetter": "responseOneLetter",
"responseTwoLetter": "responseTwoLetter",
"responseOneQuality": "responseOneQuality",
"responseTwoQuality": "responseTwoQuality",
"responseDifference": "responseDifference",
"positionOne": [x, y],
"positionTwo": [x, y],
"perspectives01": [{x, y, z}],
"perspectives02": [{x, y, z}],
"total_time_1": time,
"total_time_2": time,
"tabswitch": int
}
]
"""
randersacker_blocks = []
for i in blocks:
for j in i["data"]:
# Randersacker Clouds haben nach dem Q immer 1 Bsp: Q10 ("original"), Q11 ("V4"), Q12 ("V8")
if j["cloudOne"][1] == "1":
randersacker_blocks.append(j)
return randersacker_blocks
def plot_multiple_heatmaps_bremen(data_list, positions, export_path, name):
"""
plotet die Heatmaps für die verschiedenen Positionen in Bremen und bündelt sie in einem Bild
"""
# Create an empty array of length len(positions)
data = [[[], []] for _ in range(len(positions))]
# Insert data into the array
for item in data_list:
for j in range(len(positions)):
if item["positionOne"] == positions[j]:
for k in item["perspectives01"][0]:
data[j][0].append(k["x"])
data[j][1].append(k["y"])
if item["positionTwo"] == positions[j]:
for k in item["perspectives02"][0]:
data[j][0].append(k["x"])
data[j][1].append(k["y"])
# Set up a 4x2 grid of subplots for 8 heatmaps (2 per row)
fig, axs = plt.subplots(4, 2, figsize=(18, 24),
gridspec_kw={'height_ratios': [1, 1, 1, 1], 'width_ratios': [1, 1], 'hspace': 0.3, 'wspace': 0.2})
# Add an axis for the colorbar on top
cax = fig.add_axes([0.1, 0.92, 0.8, 0.02])
# Load the background image
img = plt.imread('maps/bremen_heat_back.png') # Replace with the path to your actual image
extent = [-153.2, 76.5, -79.7, 103.8]
# Function to create and plot heatmap
def create_heatmap(ax, x, y):
ax.imshow(img, extent=extent, aspect='auto', alpha=0.5)
hist, _, _ = np.histogram2d(x, y, bins=20, range=[[-153.2, 76.5], [-79.7, 103.8]])
if hist.sum() != 0:
hist_percentage = (hist / hist.sum()) * 100
else:
hist_percentage = hist
return ax.imshow(hist_percentage.T, extent=extent, origin='lower', cmap='hot', alpha=0.6)
# Create heatmaps for each position
ims = []
for i, ax in enumerate(axs.flat):
ims.append(create_heatmap(ax, data[i][0], data[i][1]))
ax.set_xlabel('X Coordinate', fontsize=24)
ax.set_ylabel('Y Coordinate', fontsize=24)
ax.set_xlim(-153.2, 76.5)
ax.set_ylim(-79.7, 103.8)
ax.set_title(f'Position: {i+1}', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.plot(positions[i][0], positions[i][1], 'gx', markersize=20, markeredgewidth=5)
# Find the global maximum percentage for consistent color scaling
vmax = max(im.get_array().max() for im in ims)
# Update color scaling for all heatmaps
for im in ims:
im.set_clim(0, vmax)
# Add colorbar
cbar = plt.colorbar(ims[0], cax=cax, orientation='horizontal')
cbar.set_label('Movement Percentage (%)', fontsize=24)
cbar.ax.tick_params(labelsize=24)
# Adjust layout and save
# Manually adjust layout to avoid overlap and make space for the colorbar
plt.subplots_adjust(left=0.1, right=0.9, top=0.88, bottom=0.05, hspace=0.6, wspace=0.3)
plt.savefig(f"{export_path}/{name}.png", dpi=300)
plt.close()
def filter_x_y_from_blocks(blocks):
"""
Filtert die x und y Koordinaten aus den Blöcken und gibt sie als Listen zurück.
Args:
blocks: Liste von Blöcken
Returns:
x: Liste von x-Koordinaten
y: Liste von y-Koordinaten
"""
x,y = [],[]
for i in blocks:
for j in i["perspectives01"][0]:
x.append(j["x"])
y.append(j["y"])
for j in i["perspectives02"][0]:
y.append(j["y"])
x.append(j["x"])
return x,y
def plot_heatmap_bremen(x,y, export_path, name):
fig, ax = plt.subplots(figsize=(16, 14))
# Load and add the background image
img = plt.imread('maps/bremen_heat_back.png') # Replace with path to your actual image
ax.imshow(img, extent=[-153.2, 76.5, -79.7, 103.8], aspect='auto', alpha=0.8)
# Create a 2D histogram
hist, xedges, yedges = np.histogram2d(x, y, bins=20, range=[[-153.2, 76.5], [-79.7, 103.8]])
# Create a heatmap
heatmap = ax.imshow(hist.T, extent=[-153.2, 76.5, -79.7, 103.8], origin='lower', cmap='hot', alpha=0.65)
# Add a colorbar
cbar = plt.colorbar(heatmap, ax=ax)
cbar.set_label('Density')
ax.set_title('Heatmap of Bremen')
ax.set_xlabel('X Coordinate')
ax.set_ylabel('Y Coordinate')
# Set the axis limits to match the image extent
ax.set_xlim(-153.2, 76.5)
ax.set_ylim(-79.7, 103.8)
# Add grid lines
ax.grid(True, linestyle='--', alpha=0.7)
plt.savefig(export_path+"/"+name+".png", dpi=300)
def plot_multiple_heatmaps_rander(data_list, positions, export_path, name):
"""
plotet die Heatmaps für die verschiedenen Positionen in Randersacker und bündelt sie in einem Bild
"""
# Create an empty array of length len(positions)
data = [[[], []] for _ in range(len(positions))]
# Insert data into the array
for item in data_list:
for j in range(len(positions)):
if item["positionOne"] == positions[j]:
for k in item["perspectives01"][0]:
data[j][0].append(k["x"])
data[j][1].append(k["y"])
if item["positionTwo"] == positions[j]:
for k in item["perspectives02"][0]:
data[j][0].append(k["x"])
data[j][1].append(k["y"])
# Set up a 4x2 grid of subplots for 8 heatmaps (2 per row)
fig, axs = plt.subplots(3, 2, figsize=(18, 24),
gridspec_kw={'height_ratios': [1, 1, 1], 'width_ratios': [1, 1], 'hspace': 0.25, 'wspace': 0.1})
# Add an axis for the colorbar on top
cax = fig.add_axes([0.1, 0.92, 0.8, 0.02])
# Load the background image
img = plt.imread('maps/rander_heat_back.png') # Replace with the path to your actual image
extent = [-41.3, 39.2, -54.3, 24.1]
# Function to create and plot heatmap
def create_heatmap(ax, x, y):
ax.imshow(img, extent=extent, aspect='auto', alpha=0.5)
hist, _, _ = np.histogram2d(x, y, bins=20, range=[[-41.3, 39.2], [-54.3, 24.1]])
hist_percentage = (hist / hist.sum()) * 100
return ax.imshow(hist_percentage.T, extent=extent, origin='lower', cmap='hot', alpha=0.6)
# Create heatmaps for each position
ims = []
for i, ax in enumerate(axs.flat):
ims.append(create_heatmap(ax, data[i][0], data[i][1]))
ax.set_xlabel('X Coordinate', fontsize=24)
ax.set_ylabel('Y Coordinate', fontsize=24)
ax.set_xlim(-41.3, 39.2)
ax.set_ylim(-54.3, 24.1)
ax.set_title(f'Position: {i+1}', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.plot(positions[i][0], positions[i][1], 'gx', markersize=20, markeredgewidth=5)
# Find the global maximum percentage for consistent color scaling
vmax = max(im.get_array().max() for im in ims)
# Update color scaling for all heatmaps
for im in ims:
im.set_clim(0, vmax)
# Add colorbar
cbar = plt.colorbar(ims[0], cax=cax, orientation='horizontal')
cbar.set_label('Movement Percentage (%)', fontsize=24)
cbar.ax.tick_params(labelsize=24)
# Adjust layout and save
# Manually adjust layout to avoid overlap and make space for the colorbar
plt.subplots_adjust(left=0.1, right=0.9, top=0.88, bottom=0.05, hspace=0.4, wspace=0.3)
plt.savefig(f"{export_path}/{name}.png", dpi=300)
plt.close()
def plot_triple_heatmap_randersacker(x1, y1, x2, y2, x3, y3, export_path, name, naming="sizes"):
"""
stellt drei Heatmaps für die verschiedenen übergebenen Daten dar für rander.
Dargestellt werden die Heatmaps in Anzahl von drei neben einander.
"""
fig, (ax1, ax2, ax3, cax) = plt.subplots(1, 4, figsize=(24, 6),
gridspec_kw={'width_ratios': [1, 1, 1, 0.05]})
# Load the background image
img = plt.imread('maps/rander_heat_back.png') # Replace with path to your actual image
extent = [-41.3, 39.2, -54.3, 24.1]
# Function to create and plot heatmap
def create_heatmap(ax, x, y):
ax.imshow(img, extent=extent, aspect='auto', alpha=0.5)
hist, _, _ = np.histogram2d(x, y, bins=20, range=[[-41.3, 39.2], [-54.3, 24.1]])
hist_percentage = (hist / hist.sum()) * 100
return ax.imshow(hist_percentage.T, extent=extent, origin='lower', cmap='hot', alpha=0.6)
# Create heatmaps
im1 = create_heatmap(ax1, x1, y1)
im2 = create_heatmap(ax2, x2, y2)
im3 = create_heatmap(ax3, x3, y3)
# Find the global maximum percentage for consistent color scaling
vmax = max(im1.get_array().max(), im2.get_array().max(), im3.get_array().max())
# Update color scaling
im1.set_clim(0, vmax)
im2.set_clim(0, vmax)
im3.set_clim(0, vmax)
# Adjust axes
for ax in (ax1, ax2, ax3):
ax.set_xlabel('X Coordinate', fontsize=24)
ax.set_ylabel('Y Coordinate', fontsize=24)
ax.set_xlim(-41.3, 39.2)
ax.set_ylim(-54.3, 24.1)
ax.tick_params(axis='both', which='major', labelsize=24)
if naming == "sizes":
ax1.set_title('Incentive Size Small', fontsize=24)
ax2.set_title('Incentive Size Medium', fontsize=24)
ax3.set_title('Incentive Size Large', fontsize=24)
elif naming == "count":
ax1.set_title('Incentive Amount One', fontsize=24)
ax2.set_title('Incentive Amount Two', fontsize=24)
ax3.set_title('Incentive Amount Three', fontsize=24)
# Add colorbar
cbar = plt.colorbar(im3, cax=cax)
cbar.set_label('Movement Percentage (%)', fontsize=24)
cbar.ax.tick_params(labelsize=24)
# Adjust layout and save
plt.tight_layout()
plt.savefig(f"{export_path}/{name}.png", dpi=300)
plt.close()
def plot_triple_heatmap_bremen(x1, y1, x2, y2, x3, y3, export_path, name, naming="sizes"):
"""
stellt drei Heatmaps für die verschiedenen übergebenen Daten dar für bremen.
Dargestellt werden die Heatmaps in Anzahl von drei neben einander.
"""
fig, (ax1, ax2, ax3, cax) = plt.subplots(1, 4, figsize=(24, 6),
gridspec_kw={'width_ratios': [1, 1, 1, 0.05]})
# Load the background image
img = plt.imread('maps/bremen_heat_back.png') # Replace with path to your actual image
extent = [-153.2, 76.5, -79.7, 103.8]
# Function to create and plot heatmap
def create_heatmap(ax, x, y):
ax.imshow(img, extent=extent, aspect='auto', alpha=0.5)
hist, _, _ = np.histogram2d(x, y, bins=20, range=[[-153.2, 76.5], [-79.7, 103.8]])
hist_percentage = (hist / hist.sum()) * 100
return ax.imshow(hist_percentage.T, extent=extent, origin='lower', cmap='hot', alpha=0.6)
# Create heatmaps
im1 = create_heatmap(ax1, x1, y1)
im2 = create_heatmap(ax2, x2, y2)
im3 = create_heatmap(ax3, x3, y3)
# Find the global maximum percentage for consistent color scaling
vmax = max(im1.get_array().max(), im2.get_array().max(), im3.get_array().max())
# Update color scaling
im1.set_clim(0, vmax)
im2.set_clim(0, vmax)
im3.set_clim(0, vmax)
# Adjust axes
for ax in (ax1, ax2, ax3):
ax.set_xlabel('X Coordinate', fontsize=24)
ax.set_ylabel('Y Coordinate', fontsize=24)
ax.set_xlim(-153.2, 76.5)
ax.set_ylim(-79.7, 103.8)
ax.tick_params(axis='both', which='major', labelsize=24)
if naming == "sizes":
ax1.set_title('Incentive Size Small', fontsize=24)
ax2.set_title('Incentive Size Medium', fontsize=24)
ax3.set_title('Incentive Size Large', fontsize=24)
elif naming == "count":
ax1.set_title('Incentive Amount One', fontsize=24)
ax2.set_title('Incentive Amount Two', fontsize=24)
ax3.set_title('Incentive Amount Three', fontsize=24)
# Add colorbar
cbar = plt.colorbar(im3, cax=cax)
cbar.set_label('Movement Percentage (%)', fontsize=24)
cbar.ax.tick_params(labelsize=24)
# Adjust layout and save
plt.tight_layout()
plt.savefig(f"{export_path}/{name}.png", dpi=300)
plt.close()
def plot_double_heatmap_bremen(x1, y1, x2, y2, export_path, name):
"""
stellt zwei Heatmaps für die verschiedenen übergebenen Daten dar für bremen.
Dargestellt werden die Heatmaps in Anzahl von zwei neben einander.
"""
fig, (ax1, ax2, cax) = plt.subplots(1, 3, figsize=(24, 6),
gridspec_kw={'width_ratios': [1, 1, 0.05]})
# Load the background image
img = plt.imread('maps/bremen_heat_back.png') # Replace with path to your actual image
extent = [-153.2, 76.5, -79.7, 103.8]
# Function to create and plot heatmap
def create_heatmap(ax, x, y):
ax.imshow(img, extent=extent, aspect='auto', alpha=0.5)
hist, _, _ = np.histogram2d(x, y, bins=20, range=[[-153.2, 76.5], [-79.7, 103.8]])
hist_percentage = (hist / hist.sum()) * 100
return ax.imshow(hist_percentage.T, extent=extent, origin='lower', cmap='hot', alpha=0.6)
# Create heatmaps
im1 = create_heatmap(ax1, x1, y1)
im2 = create_heatmap(ax2, x2, y2)
# Find the global maximum percentage for consistent color scaling
vmax = max(im1.get_array().max(), im2.get_array().max())
# Update color scaling
im1.set_clim(0, vmax)
im2.set_clim(0, vmax)
# Adjust axes
for ax in (ax1, ax2):
ax.set_xlabel('X Coordinate', fontsize=24)
ax.set_ylabel('Y Coordinate', fontsize=24)
ax.set_xlim(-153.2, 76.5)
ax.set_ylim(-79.7, 103.8)
ax.tick_params(axis='both', which='major', labelsize=24)
ax1.set_title('No Emphasis', fontsize=24)
ax2.set_title('Emphasis', fontsize=24)
# Add colorbar
cbar = plt.colorbar(im2, cax=cax)
cbar.set_label('Movement Percentage (%)', fontsize=24)
cbar.ax.tick_params(labelsize=24)
# Adjust layout and save
plt.tight_layout()
plt.savefig(f"{export_path}/{name}.png", dpi=300)
plt.close()
def plot_double_heatmap_randersacker(x1, y1, x2, y2, export_path, name):
"""
stellt zwei Heatmaps für die verschiedenen übergebenen Daten dar für rander.
Dargestellt werden die Heatmaps in Anzahl von zwei neben einander.
"""
fig, (ax1, ax2, cax) = plt.subplots(1, 3, figsize=(24, 6),
gridspec_kw={'width_ratios': [1, 1, 0.05]})
img = plt.imread('maps/rander_heat_back.png') # Replace with path to your actual image
extent = [-41.3, 39.2, -54.3, 24.1]
# Function to create and plot heatmap
def create_heatmap(ax, x, y):
ax.imshow(img, extent=extent, aspect='auto', alpha=0.5)
hist, _, _ = np.histogram2d(x, y, bins=20, range=[[-41.3, 39.2], [-54.3, 24.1]])
hist_percentage = (hist / hist.sum()) * 100
return ax.imshow(hist_percentage.T, extent=extent, origin='lower', cmap='hot', alpha=0.6)
# Create heatmaps
im1 = create_heatmap(ax1, x1, y1)
im2 = create_heatmap(ax2, x2, y2)
# Find the global maximum percentage for consistent color scaling
vmax = max(im1.get_array().max(), im2.get_array().max())
# Update color scaling
im1.set_clim(0, vmax)
im2.set_clim(0, vmax)
# Adjust axes
for ax in (ax1, ax2):
ax.set_xlabel('X Coordinate', fontsize=24)
ax.set_ylabel('Y Coordinate', fontsize=24)
ax.set_xlim(-41.3, 39.2)
ax.set_ylim(-54.3, 24.1)
ax.tick_params(axis='both', which='major', labelsize=24)
ax1.set_title('No Emphasis', fontsize=24)
ax2.set_title('Emphasis', fontsize=24)
# Add colorbar
cbar = plt.colorbar(im2, cax=cax)
cbar.set_label('Movement Percentage (%)', fontsize=24)
cbar.ax.tick_params(labelsize=24)
# Adjust layout and save
plt.tight_layout()
plt.savefig(f"{export_path}/{name}.png", dpi=300)
plt.close()
def plot_sucessrate_by_element_pos_size( small_elements,medium_elements, large_elements,export_path, stimuli):
"""
Erstellt die Balkendiagramme für die Erfolgsrate der Elemente nach Größe und Position.
"""
plot_data = []
for i in range(len(medium_elements)):
plot_element = {
"position": "",
"small_rate": 0,
"medium_rate": 0,
"large_rate": 0
}
plot_element["position"] = medium_elements[i]["position"]
plot_element["small_rate"] = round(small_elements[i]["success_rate"],2)
plot_element["medium_rate"] = round(medium_elements[i]["success_rate"],2)
plot_element["large_rate"] = round(large_elements[i]["success_rate"],2)
plot_data.append(plot_element)
categories = ('small', 'medium', 'large')
x = np.arange(len(plot_data)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots(layout='constrained', figsize=(12, 6))
# Balken für die small_rate
rects1 = ax.bar(x - width, [element['small_rate'] for element in plot_data], width, label='Small', color="#5e4c5f")
# Balken für die medium_rate
rects2 = ax.bar(x, [element['medium_rate'] for element in plot_data], width, label='Medium', color='#999999')
# Balken für die large_rate
rects3 = ax.bar(x + width, [element['large_rate'] for element in plot_data], width, label='Large', color='#ffbb6f')
# Hinzufügen von Beschriftungen, Titel und benutzerdefinierten x-Achsen-Tick-Labels
ax.set_ylabel('Success Rate in %', fontsize=16)
ax.set_xticks(x)
ax.set_xticklabels(["pos "+str(index+1) for index, element in enumerate(plot_data)], ha='center', fontsize=16)
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=16)
# Diagramm anzeigen
plt.savefig(export_path+"/"+"success_rate_by_element_size_pos_" + stimuli + ".png", dpi=300)
def create_csv_elements_bremen(obj_bremen, positions_bremen, export_path, csv_name):
table_obj_bremen = []
for i in positions_bremen:
eval_obj = {
"position": i,
"stimulus": "bremen",
"number_of_appearances": 0,
"number_of_correct": 0,
"average_time": 0,
"success_rate": 0,
}
table_obj_bremen.append(eval_obj)
for i in obj_bremen:
for j in table_obj_bremen:
if i["positionOne"] == j["position"]:
j["number_of_appearances"] += 1
if i["controlLetterOne"] == i["responseOneLetter"]:
j["number_of_correct"] += 1
j["average_time"] += i["total_time_1"]
if i["positionTwo"] == j["position"]:
j["number_of_appearances"] += 1
if i["controlLetterTwo"] == i["responseTwoLetter"]:
j["number_of_correct"] += 1
j["average_time"] += i["total_time_2"]
for i in table_obj_bremen:
i["average_time"] = i["average_time"] / i["number_of_appearances"]
#convert time to mindutes
i["average_time"] = (i["average_time"]/1000) / 60
i["success_rate"] = i["number_of_correct"] / i["number_of_appearances"]
#make csv for table with pandas dont use data module
df = pd.DataFrame(table_obj_bremen)
df.to_csv(export_path+"/"+csv_name+".csv", index=False)
return table_obj_bremen
def create_csv_elements_randersacker(obj_randersacker, positions_randersacker, export_path, csv_name):
table_obj_rander = []
for i in positions_randersacker:
eval_obj = {
"position": i,
"stimulus": "randersacker",
"number_of_appearances": 0,
"number_of_correct": 0,
"average_time": 0,
"success_rate": 0,
}
table_obj_rander.append(eval_obj)
for i in obj_randersacker:
for j in table_obj_rander:
if i["positionOne"] == j["position"]:
j["number_of_appearances"] += 1
if i["controlLetterOne"] == i["responseOneLetter"]:
j["number_of_correct"] += 1
j["average_time"] += i["total_time_1"]
if i["positionTwo"] == j["position"]:
j["number_of_appearances"] += 1
if i["controlLetterTwo"] == i["responseTwoLetter"]:
j["number_of_correct"] += 1
j["average_time"] += i["total_time_2"]
for i in table_obj_rander:
i["average_time"] = i["average_time"] / i["number_of_appearances"]
#convert time to mindutes
i["average_time"] = (i["average_time"]/1000) / 60
i["success_rate"] = i["number_of_correct"] / i["number_of_appearances"]
df = pd.DataFrame(table_obj_rander)
df.to_csv(export_path+"/"+csv_name+".csv", index=False)
return table_obj_rander
def plot_qoe_acr_rating_by_size(blocks_small, blocks_medium, blocks_large, name, export_path, naming="sizes"):
"""
Erstellt ein Balkendiagramm für die QoE-Bewertung nach Größe / Anzahl.
"""
categories_size = ('small', 'medium', 'large')
categories_reduction = ('Original', 'OCV4', 'OCV8', 'OCV30', 'R3600x1000', 'R2400x667', 'R1200x333')
categories_acr = ('Bad', 'Poor', 'Fair', 'Good', 'Excellent')
def filter_answers(data):
answers = {
'Bad': [0, 0, 0, 0, 0, 0, 0],
'Poor': [0, 0, 0, 0, 0, 0, 0],
'Fair': [0, 0, 0, 0, 0, 0, 0],
'Good': [0, 0, 0, 0, 0, 0, 0],
'Excellent': [0, 0, 0, 0, 0, 0, 0],
}
for i in data:
if i["cloudOne"][2] == "0":
answers[i["responseOneQuality"]][0] += 1
if i["cloudTwo"][2] == "1":
answers[i["responseTwoQuality"]][1] += 1
if i["cloudTwo"][2] == "2":
answers[i["responseTwoQuality"]][2] += 1
if i["cloudTwo"][2] == "3":
answers[i["responseTwoQuality"]][3] += 1
if i["cloudTwo"][2] == "4":
answers[i["responseTwoQuality"]][4] += 1
if i["cloudTwo"][2] == "5":
answers[i["responseTwoQuality"]][5] += 1
if i["cloudTwo"][2] == "6":
answers[i["responseTwoQuality"]][6] += 1
return answers
def normalize_data(data):
for i in range(7):
count = 0
for j in data:
count += data[j][i]
for j in data:
if count == 0:
data[j][i] = 0
else:
data[j][i] = (data[j][i] / count)
for i in data:
data[i] = np.array(data[i])
return data
data_small = filter_answers(blocks_small)
data_medium = filter_answers(blocks_medium)
data_large = filter_answers(blocks_large)
data_small = normalize_data(data_small)
data_medium = normalize_data(data_medium)
data_large = normalize_data(data_large)
x = np.arange(len(categories_reduction))
width = 0.2
fig, ax = plt.subplots(figsize=(13, 7)) # Increase figure size
# Plot bars for data_small
ax.bar(x - width, data_small["Bad"], width, label='Small Bad', color='#A11B1F')
ax.bar(x - width, data_small["Poor"], width, bottom=data_small["Bad"], color='#CB5353')
ax.bar(x - width, data_small["Fair"], width, bottom=np.array(data_small["Bad"]) + np.array(data_small["Poor"]), color='#F5A31D')
ax.bar(x - width, data_small["Good"], width, bottom=np.array(data_small["Bad"]) + np.array(data_small["Poor"]) + np.array(data_small["Fair"]), color='#81C560')
ax.bar(x - width, data_small["Excellent"], width, bottom=np.array(data_small["Bad"]) + np.array(data_small["Poor"]) + np.array(data_small["Fair"]) + np.array(data_small["Good"]), color='#006134')
# Plot bars for data_medium
ax.bar(x, data_medium["Bad"], width, label='Medium Bad', color='#A11B1F')
ax.bar(x, data_medium["Poor"], width, bottom=data_medium["Bad"], color='#CB5353')
ax.bar(x, data_medium["Fair"], width, bottom=np.array(data_medium["Bad"]) + np.array(data_medium["Poor"]), color='#F5A31D')
ax.bar(x, data_medium["Good"], width, bottom=np.array(data_medium["Bad"]) + np.array(data_medium["Poor"]) + np.array(data_medium["Fair"]), color='#81C560')
ax.bar(x, data_medium["Excellent"], width, bottom=np.array(data_medium["Bad"]) + np.array(data_medium["Poor"]) + np.array(data_medium["Fair"]) + np.array(data_medium["Good"]), color='#006134')
# Plot bars for data_large
ax.bar(x + width, data_large["Bad"], width, label='Large Bad', color='#A11B1F')
ax.bar(x + width, data_large["Poor"], width, bottom=data_large["Bad"], color='#CB5353')
ax.bar(x + width, data_large["Fair"], width, bottom=np.array(data_large["Bad"]) + np.array(data_large["Poor"]), color='#F5A31D')
ax.bar(x + width, data_large["Good"], width, bottom=np.array(data_large["Bad"]) + np.array(data_large["Poor"]) + np.array(data_large["Fair"]), color='#81C560')
ax.bar(x + width, data_large["Excellent"], width, bottom=np.array(data_large["Bad"]) + np.array(data_large["Poor"]) + np.array(data_large["Fair"]) + np.array(data_large["Good"]), color='#006134')
# Set x-ticks to align with each bar
xticks_positions = np.concatenate([x - width, x, x + width])
if naming == "sizes":
xticks_labels = [f'{label} (S)' for label in categories_reduction] + \
[f'{label} (M)' for label in categories_reduction] + \
[f'{label} (L)' for label in categories_reduction]
elif naming == "count":
xticks_labels = [f'{label} (1)' for label in categories_reduction] + \
[f'{label} (2)' for label in categories_reduction] + \
[f'{label} (3)' for label in categories_reduction]
ax.set_xticks(xticks_positions)
ax.set_xticklabels(xticks_labels, rotation=90)
ax.set_ylim(0, 1)
ax.set_ylabel('Quality Rating in %', fontsize=12)
ax.legend(categories_acr, loc='upper left', bbox_to_anchor=(1, 1))
# Add vertical lines to separate groups
ax.vlines(x[0] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[0] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[1] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[1] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[2] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[2] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[3] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[3] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[4] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[4] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[5] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[5] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[6] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[6] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
# Adjust layout to make space for labels
plt.subplots_adjust(bottom=0.3)
plt.savefig(f"{export_path}/{name}.png", dpi=300)
def plot_qoe_acr_rating_two_inputs(blocks_first, blocks_sec, name, export_path, naming="sizes"):
categories_reduction = ('Original', 'OCV4', 'OCV8', 'OCV30', 'R3600x1000', 'R2400x667', 'R1200x333')
categories_acr = ('Bad', 'Poor', 'Fair', 'Good', 'Excellent')
def filter_answers(data):
answers = {
'Bad': [0, 0, 0, 0, 0, 0, 0],
'Poor': [0, 0, 0, 0, 0, 0, 0],
'Fair': [0, 0, 0, 0, 0, 0, 0],
'Good': [0, 0, 0, 0, 0, 0, 0],
'Excellent': [0, 0, 0, 0, 0, 0, 0],
}
for i in data:
if i["cloudOne"][2] == "0":
answers[i["responseOneQuality"]][0] += 1
if i["cloudTwo"][2] == "1":
answers[i["responseTwoQuality"]][1] += 1
if i["cloudTwo"][2] == "2":
answers[i["responseTwoQuality"]][2] += 1
if i["cloudTwo"][2] == "3":
answers[i["responseTwoQuality"]][3] += 1
if i["cloudTwo"][2] == "4":
answers[i["responseTwoQuality"]][4] += 1
if i["cloudTwo"][2] == "5":
answers[i["responseTwoQuality"]][5] += 1
if i["cloudTwo"][2] == "6":
answers[i["responseTwoQuality"]][6] += 1
return answers
def normalize_data(data):
for i in range(7):
count = 0
for j in data:
count += data[j][i]
for j in data:
if count == 0:
data[j][i] = 0
else:
data[j][i] = (data[j][i] / count)
for i in data:
data[i] = np.array(data[i])
return data
data_small = filter_answers(blocks_first)
data_medium = filter_answers(blocks_sec)
data_small = normalize_data(data_small)
data_medium = normalize_data(data_medium)
x = np.arange(len(categories_reduction))
width = 0.2
fig, ax = plt.subplots(figsize=(13, 7)) # Increase figure size
# Plot bars for data_small
ax.bar(x - width, data_small["Bad"], width, label='Small Bad', color='#A11B1F')
ax.bar(x - width, data_small["Poor"], width, bottom=data_small["Bad"], color='#CB5353')
ax.bar(x - width, data_small["Fair"], width, bottom=np.array(data_small["Bad"]) + np.array(data_small["Poor"]), color='#F5A31D')
ax.bar(x - width, data_small["Good"], width, bottom=np.array(data_small["Bad"]) + np.array(data_small["Poor"]) + np.array(data_small["Fair"]), color='#81C560')
ax.bar(x - width, data_small["Excellent"], width, bottom=np.array(data_small["Bad"]) + np.array(data_small["Poor"]) + np.array(data_small["Fair"]) + np.array(data_small["Good"]), color='#006134')
# Plot bars for data_medium
ax.bar(x, data_medium["Bad"], width, label='Medium Bad', color='#A11B1F')
ax.bar(x, data_medium["Poor"], width, bottom=data_medium["Bad"], color='#CB5353')
ax.bar(x, data_medium["Fair"], width, bottom=np.array(data_medium["Bad"]) + np.array(data_medium["Poor"]), color='#F5A31D')
ax.bar(x, data_medium["Good"], width, bottom=np.array(data_medium["Bad"]) + np.array(data_medium["Poor"]) + np.array(data_medium["Fair"]), color='#81C560')
ax.bar(x, data_medium["Excellent"], width, bottom=np.array(data_medium["Bad"]) + np.array(data_medium["Poor"]) + np.array(data_medium["Fair"]) + np.array(data_medium["Good"]), color='#006134')
# Set x-ticks to align with each bar
xticks_positions = np.concatenate([x - width, x])
if naming == "sizes":
xticks_labels = [f'{label} (S)' for label in categories_reduction] + \
[f'{label} (M)' for label in categories_reduction]
elif naming == "count":
xticks_labels = [f'{label} (1)' for label in categories_reduction] + \
[f'{label} (2)' for label in categories_reduction]
elif naming == "emph":
xticks_labels = [f'{label} (E)' for label in categories_reduction] + \
[f'{label} (N)' for label in categories_reduction]
ax.set_xticks(xticks_positions)
ax.set_xticklabels(xticks_labels, rotation=90)
ax.set_ylim(0, 1)
ax.set_ylabel('Quality Rating in %', fontsize=12)
ax.legend(categories_acr, loc='upper left', bbox_to_anchor=(1, 1))
# Add vertical lines to separate groups
ax.vlines(x[0] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[0] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[1] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[1] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[2] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[2] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[3] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[3] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[4] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[4] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[5] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[5] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[6] - width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
ax.vlines(x[6] + width / 2, ymin=ax.get_ylim()[0], ymax=1, colors='black', linestyles='--', linewidth=1)
# Adjust layout to make space for labels
plt.subplots_adjust(bottom=0.3)
plt.savefig(f"{export_path}/{name}.png", dpi=300)
def plot_qoe_acr_rating_bar(blocks_small, blocks_medium, blocks_large, name, export_path, naming="sizes"):
categories_reduction = ('Original', 'OCV4', 'OCV8', 'OCV30', 'R3600x1000', 'R2400x667', 'R1200x333')
categories_acr = ('Small', 'Medium', 'Large')
categories_acr_amount = ("One", "Two", "Three")
def filter_answers(data):
answers = {
'org': [0, 0, 0, 0, 0],
'ocv4': [0, 0, 0, 0, 0],
'ocv8': [0, 0, 0, 0, 0],
'ocv30': [0, 0, 0, 0, 0],
'r3600x1000': [0, 0, 0, 0, 0],
'r2400x667': [0, 0, 0, 0, 0],
'r1200x333': [0, 0, 0, 0, 0]
}
def map_acr(acr):
if acr == "Bad":
return 0
if acr == "Poor":
return 1
if acr == "Fair":
return 2
if acr == "Good":
return 3
if acr == "Excellent":
return 4
for i in data:
if i["cloudOne"][2] == "0":
answers["org"][map_acr(i["responseOneQuality"])] += 1
if i["cloudTwo"][2] == "1":
answers["ocv4"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "2":
answers["ocv8"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "3":
answers["ocv30"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "4":
answers["r3600x1000"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "5":
answers["r2400x667"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "6":
answers["r1200x333"][map_acr(i["responseTwoQuality"])] += 1
return answers
data_small = filter_answers(blocks_small)
data_medium = filter_answers(blocks_medium)
data_large = filter_answers(blocks_large)
def calc_bar_value(arr):
ratings = 0
count = 0
for i in range(len(arr)):
count += arr[i]
ratings += arr[i] * (i+1)
if count == 0:
return 0
else:
return ratings/count
def calculate_yerr(data, confidence_level=0.95):
def expand_array(arr):
# Initialize an empty list to store the expanded values
expanded_list = []
# Iterate over each value in the input array
for i, value in enumerate(arr):
# Extend the expanded_list with the current value repeated 'value' times
expanded_list.extend([i+1] * value)
return expanded_list
array = expand_array(data)
array = np.array(array)
if len(array) <= 1:
return np.nan
# Compute mean and standard deviation
mean = np.mean(array)
std_dev = np.std(array, ddof=1) # Sample standard deviation
n = len(array)
# Compute the standard error of the mean
standard_error = std_dev / np.sqrt(n)
# Determine the t-value for the given confidence level
degrees_of_freedom = n - 1
t_value = stats.t.ppf((1 + confidence_level) / 2, degrees_of_freedom)
# Calculate margin of error
margin_of_error = t_value * standard_error
return margin_of_error
x = np.arange(len(categories_reduction))
width = 0.2
fig, ax = plt.subplots(figsize=(13, 7)) # Increase figure size
# Plot bars for org
keys = ["org", "ocv4", "ocv8", "ocv30", "r3600x1000", "r2400x667", "r1200x333"]
# Berechne die Werte und Konfidenzintervalle für jede Kategorie
bar_small = [calc_bar_value(data_small[key]) for key in keys]
bar_medium = [calc_bar_value(data_medium[key]) for key in keys]
bar_large = [calc_bar_value(data_large[key]) for key in keys]
ci_small = [calculate_yerr(data_small[key]) for key in keys]
ci_medium = [calculate_yerr(data_medium[key]) for key in keys]
ci_large = [calculate_yerr(data_large[key]) for key in keys]
bars_small = ax.bar(x - width, bar_small, width, label='Small', color='#5e4c5f',yerr=ci_small, capsize=5)
bars_medium = ax.bar(x, bar_medium, width, label='Medium', color='#999999',yerr=ci_medium, capsize=5)
bars_large = ax.bar(x + width, bar_large, width, label='Large', color='#ffbb6f',yerr=ci_large, capsize=5)
# Set x-ticks to align with each bar
xticks_positions = np.concatenate([x - width, x, x + width])
if naming == "sizes":
xticks_labels = [f'{label} (S)' for label in categories_reduction] + \
[f'{label} (M)' for label in categories_reduction] + \
[f'{label} (L)' for label in categories_reduction]
elif naming == "count":
xticks_labels = [f'{label} (1)' for label in categories_reduction] + \
[f'{label} (2)' for label in categories_reduction] + \
[f'{label} (3)' for label in categories_reduction]
ax.set_xticks(xticks_positions)
ax.set_xticklabels(xticks_labels, rotation=90)
ax.set_ylim(0, 5)
ax.set_ylabel('Average ACR Rating', fontsize=12)
if naming == "sizes":
ax.legend(categories_acr, loc='upper left', bbox_to_anchor=(1, 1))
elif naming == "count":
ax.legend(categories_acr_amount, loc='upper left', bbox_to_anchor=(1, 1))
# Adjust layout to make space for labels
plt.subplots_adjust(bottom=0.3)
plt.tight_layout()
plt.savefig(f"{export_path}/{name}.png", dpi=300)
def plot_qoe_acr_rating_bar_emphasis(blocks_e, blocks_ne, name, export_path):
categories_reduction = ('Original', 'OCV4', 'OCV8', 'OCV30', 'R3600x1000', 'R2400x667', 'R1200x333')
categories_acr = ('Emphasis', 'No Emphasis')
def filter_answers(data):
answers = {
'org': [0, 0, 0, 0, 0],
'ocv4': [0, 0, 0, 0, 0],
'ocv8': [0, 0, 0, 0, 0],
'ocv30': [0, 0, 0, 0, 0],
'r3600x1000': [0, 0, 0, 0, 0],
'r2400x667': [0, 0, 0, 0, 0],
'r1200x333': [0, 0, 0, 0, 0]
}
def map_acr(acr):
if acr == "Bad":
return 0
if acr == "Poor":
return 1
if acr == "Fair":
return 2
if acr == "Good":
return 3
if acr == "Excellent":
return 4
for i in data:
if i["cloudOne"][2] == "0":
answers["org"][map_acr(i["responseOneQuality"])] += 1
if i["cloudTwo"][2] == "1":
answers["ocv4"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "2":
answers["ocv8"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "3":
answers["ocv30"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "4":
answers["r3600x1000"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "5":
answers["r2400x667"][map_acr(i["responseTwoQuality"])] += 1
if i["cloudTwo"][2] == "6":
answers["r1200x333"][map_acr(i["responseTwoQuality"])] += 1
return answers
data_small = filter_answers(blocks_e)
data_medium = filter_answers(blocks_ne)
def calc_bar_value(arr):
ratings = 0
count = 0
for i in range(len(arr)):
count += arr[i]
ratings += arr[i] * (i+1)
if count == 0:
return 0
else:
return ratings/count
def calculate_yerr(data, confidence_level=0.95):
def expand_array(arr):
# Initialize an empty list to store the expanded values
expanded_list = []
# Iterate over each value in the input array
for i, value in enumerate(arr):
# Extend the expanded_list with the current value repeated 'value' times
expanded_list.extend([i+1] * value)
return expanded_list
array = expand_array(data)
array = np.array(array)
if len(array) <= 1:
return np.nan
# Compute mean and standard deviation
mean = np.mean(array)
std_dev = np.std(array, ddof=1) # Sample standard deviation
n = len(array)
# Compute the standard error of the mean
standard_error = std_dev / np.sqrt(n)
# Determine the t-value for the given confidence level
degrees_of_freedom = n - 1
t_value = stats.t.ppf((1 + confidence_level) / 2, degrees_of_freedom)
# Calculate margin of error
margin_of_error = t_value * standard_error
return margin_of_error
x = np.arange(len(categories_reduction))
width = 0.2
fig, ax = plt.subplots(figsize=(13, 7)) # Increase figure size
# Plot bars for org
keys = ["org", "ocv4", "ocv8", "ocv30", "r3600x1000", "r2400x667", "r1200x333"]
# Berechne die Werte und Konfidenzintervalle für jede Kategorie
bar_small = [calc_bar_value(data_small[key]) for key in keys]
bar_medium = [calc_bar_value(data_medium[key]) for key in keys]
ci_small = [calculate_yerr(data_small[key]) for key in keys]
ci_medium = [calculate_yerr(data_medium[key]) for key in keys]
bars_small = ax.bar(x - width, bar_small, width, label='Small', color='#5e4c5f',yerr=ci_small, capsize=5)
bars_medium = ax.bar(x, bar_medium, width, label='Medium', color='#ffbb6f',yerr=ci_medium, capsize=5)
# Set x-ticks to align with each bar
xticks_positions = np.concatenate([x - width, x])
xticks_labels = [f'{label} (E)' for label in categories_reduction] + \
[f'{label} (N)' for label in categories_reduction]
ax.set_xticks(xticks_positions)
ax.set_xticklabels(xticks_labels, rotation=90)
ax.set_ylim(0, 5)
ax.set_ylabel('Average ACR Rating', fontsize=12)
ax.legend(categories_acr, loc='upper left', bbox_to_anchor=(1, 1))
# Adjust layout to make space for labels
plt.subplots_adjust(bottom=0.3)
plt.tight_layout()
plt.savefig(f"{export_path}/{name}.png", dpi=300)
def plot_bar_avg_movement(blocks_small, blocks_medium, blocks_large, name, export_path, naming="sizes"):
rander_data = [1, 2, 3] # Klein, Mittel, Groß
bremen_data = [2, 3, 4] # Klein, Mittel, Groß
def create_movement_values_arrays(blocks):
movement_values_bremen = []
movement_values_rander = []
for i in blocks:
combined_val_bremen = 0
combined_val_rander = 0
for j in i["data"]:
val = len(j["perspectives01"][0]) + len(j["perspectives02"][0])
if j["cloudOne"][1] == "0":
combined_val_bremen += val
else:
combined_val_rander += val
movement_values_bremen.append(combined_val_bremen)
movement_values_rander.append(combined_val_rander)
return movement_values_bremen, movement_values_rander
def create_bar_data(arr):
val = 0
for i in arr:
val += i
return val/len(arr)
def calculate_yerr(arr, confidence_level=0.95):
array = np.array(arr)
if len(array) <= 1:
return np.nan
# Compute mean and standard deviation
mean = np.mean(array)
std_dev = np.std(array, ddof=1) # Sample standard deviation
n = len(array)
# Compute the standard error of the mean
standard_error = std_dev / np.sqrt(n)
# Determine the t-value for the given confidence level
degrees_of_freedom = n - 1
t_value = stats.t.ppf((1 + confidence_level) / 2, degrees_of_freedom)
# Calculate margin of error
margin_of_error = t_value * standard_error
return margin_of_error
small_data = create_movement_values_arrays(blocks_small)
medium_data = create_movement_values_arrays(blocks_medium)
large_data = create_movement_values_arrays(blocks_large)
# Balkenbreite
width = 0.2
# x-Positionen für die Balken
x = np.arange(2) # Zwei Kategorien: Randersacker und Bremen
# Zeichnen der Balken
fig, ax = plt.subplots(figsize=(13, 7)) # Increase figure size
bars_rander_small = ax.bar(x[0] - width, create_bar_data(small_data[1]), width, label='Small', yerr=calculate_yerr(small_data[1]),color='#5e4c5f', capsize=5)
bars_rander_medium = ax.bar(x[0], create_bar_data(medium_data[1]), width, label='Medium', yerr=calculate_yerr(medium_data[1]),color='#999999', capsize=5)
bars_rander_large = ax.bar(x[0] + width, create_bar_data(large_data[1]), width,yerr=calculate_yerr(large_data[1]), label='Large', color='#ffbb6f', capsize=5)
bars_bremen_small = ax.bar(x[1] - width, create_bar_data(small_data[0]), width,yerr=calculate_yerr(small_data[0]), color='#5e4c5f', capsize=5)
bars_bremen_medium = ax.bar(x[1], create_bar_data(medium_data[0]),width,yerr=calculate_yerr(medium_data[0]),color='#999999', capsize=5)
bars_bremen_large = ax.bar(x[1] + width, create_bar_data(large_data[0]), width,yerr=calculate_yerr(large_data[0]), color='#ffbb6f', capsize=5)
# Set x-ticks to align with each category
ax.set_xticks(x)
ax.set_xticklabels(['Randersacker', 'Bremen'], rotation=0, fontsize=16)
ax.set_ylim(0, 4200)
ax.set_ylabel('Movement Amount', fontsize=16)
# Legende nur einmal hinzufügen
if naming == "sizes":
ax.legend(['Small', 'Medium', 'Large'], loc='upper left', bbox_to_anchor=(1, 1), fontsize=16)
elif naming == "count":
ax.legend(['One', 'Two', 'Three'], loc='upper left', bbox_to_anchor=(1, 1), fontsize=16)
# Adjust layout to make space for labels
plt.subplots_adjust(bottom=0.3)
plt.tight_layout()
# Speichern des Diagramms
plt.savefig(f"{export_path}/{name}.png", dpi=300)
def plot_bar_avg_emphais(blocks_small, blocks_medium, name, export_path):
def create_movement_values_arrays(blocks):
movement_values_bremen = []
movement_values_rander = []
for i in blocks:
combined_val_bremen = 0
combined_val_rander = 0
for j in i["data"]:
val = len(j["perspectives01"][0]) + len(j["perspectives02"][0])
if j["cloudOne"][1] == "0":
combined_val_bremen += val
else:
combined_val_rander += val
movement_values_bremen.append(combined_val_bremen)
movement_values_rander.append(combined_val_rander)
return movement_values_bremen, movement_values_rander
def create_bar_data(arr):
val = 0
for i in arr:
val += i
return val/len(arr)
def calculate_yerr(arr, confidence_level=0.95):
array = np.array(arr)
if len(array) <= 1:
return np.nan
# Compute mean and standard deviation
mean = np.mean(array)
std_dev = np.std(array, ddof=1) # Sample standard deviation
n = len(array)
# Compute the standard error of the mean
standard_error = std_dev / np.sqrt(n)
# Determine the t-value for the given confidence level
degrees_of_freedom = n - 1
t_value = stats.t.ppf((1 + confidence_level) / 2, degrees_of_freedom)
# Calculate margin of error
margin_of_error = t_value * standard_error
return margin_of_error
small_data = create_movement_values_arrays(blocks_small)
medium_data = create_movement_values_arrays(blocks_medium)
# Balkenbreite
width = 0.2
# x-Positionen für die Balken
x = np.arange(2) # Zwei Kategorien: Randersacker und Bremen
# Zeichnen der Balken
fig, ax = plt.subplots(figsize=(13, 7)) # Increase figure size
bars_rander_small = ax.bar(x[0] - width/2, create_bar_data(small_data[1]), width, label='Small', yerr=calculate_yerr(small_data[1]),color='#5e4c5f', capsize=5)
bars_rander_medium = ax.bar(x[0] + width/2, create_bar_data(medium_data[1]), width, label='Medium', yerr=calculate_yerr(medium_data[1]),color='#ffbb6f', capsize=5)
bars_bremen_small = ax.bar(x[1] - width/2, create_bar_data(small_data[0]), width,yerr=calculate_yerr(small_data[0]), color='#5e4c5f', capsize=5)
bars_bremen_medium = ax.bar(x[1] + width/2, create_bar_data(medium_data[0]),width,yerr=calculate_yerr(medium_data[0]),color='#ffbb6f', capsize=5)
# Set x-ticks to align with each category
ax.set_xticks(x)
ax.set_xticklabels(['Randersacker', 'Bremen'], rotation=0, fontsize=16)
ax.set_ylim(0, 4200)
ax.set_ylabel('Movement Amount', fontsize=16)
# Legende nur einmal hinzufügen
ax.legend(['Emphasis', 'No Emphasis'], loc='upper left', bbox_to_anchor=(1, 1), fontsize=16)
# Adjust layout to make space for labels
plt.subplots_adjust(bottom=0.3)
plt.tight_layout()
# Speichern des Diagramms
plt.savefig(f"{export_path}/{name}.png", dpi=300)
def build_csv_movement_stats(blocks_small, blocks_medium, blocks_large, name, export_path, naming="sizes"):
bremen_small = get_bremen_blocks(blocks_small)
bremen_medium = get_bremen_blocks(blocks_medium)
bremen_large = get_bremen_blocks(blocks_large)
rander_small = get_randersacker_blocks(blocks_small)
rander_medium = get_randersacker_blocks(blocks_medium)
rander_large = get_randersacker_blocks(blocks_large)
xb_small, yb_small = filter_x_y_from_blocks(bremen_small)
xb_medium, yb_medium = filter_x_y_from_blocks(bremen_medium)
xb_large, yb_large = filter_x_y_from_blocks(bremen_large)
xr_small, yr_small = filter_x_y_from_blocks(rander_small)
xr_medium, yr_medium = filter_x_y_from_blocks(rander_medium)
xr_large, yr_large = filter_x_y_from_blocks(rander_large)
if naming == "sizes":
data_bremen_small = {
"stimulus": "bremen_small",
"particpants": len(blocks_small),
"absolut_movement": len(xb_small),
"average_movement": len(xb_small) / len(blocks_small),
}
data_bremen_medium = {
"stimulus": "bremen_medium",
"particpants": len(blocks_medium),
"absolut_movement": len(xb_medium),
"average_movement": len(xb_medium) / len(blocks_medium),
}
data_bremen_large = {
"stimulus": "bremen_large",
"particpants": len(blocks_large),
"absolut_movement": len(xb_large),
"average_movement": len(xb_large) / len(blocks_large),
}
data_rander_small = {
"stimulus": "randersacker_small",
"particpants": len(blocks_small),
"absolut_movement": len(xr_small),
"average_movement": len(xr_small) / len(blocks_small),
}
data_rander_medium = {
"stimulus": "randersacker_medium",
"particpants": len(blocks_medium),
"absolut_movement": len(xr_medium),
"average_movement": len(xr_medium)/ len(blocks_medium),
}
data_rander_large = {
"stimulus": "randersacker_large",
"particpants": len(blocks_large),
"absolut_movement": len(xr_large),
"average_movement": len(xr_large) / len(blocks_large),
}
elif naming == "count":
data_bremen_small = {
"stimulus": "bremen_one",
"particpants": len(blocks_small),
"absolut_movement": len(xb_small),
"average_movement": len(xb_small) / len(blocks_small),
}
data_bremen_medium = {
"stimulus": "bremen_two",
"particpants": len(blocks_medium),
"absolut_movement": len(xb_medium),
"average_movement": len(xb_medium) / len(blocks_medium),
}
data_bremen_large = {
"stimulus": "bremen_three",
"particpants": len(blocks_large),
"absolut_movement": len(xb_large),
"average_movement": len(xb_large) / len(blocks_large),
}
data_rander_small = {
"stimulus": "randersacker_one",
"particpants": len(blocks_small),
"absolut_movement": len(xr_small),
"average_movement": len(xr_small) / len(blocks_small),
}
data_rander_medium = {
"stimulus": "randersacker_two",
"particpants": len(blocks_medium),
"absolut_movement": len(xr_medium),
"average_movement": len(xr_medium)/ len(blocks_medium),
}
data_rander_large = {
"stimulus": "randersacker_three",
"particpants": len(blocks_large),
"absolut_movement": len(xr_large),
"average_movement": len(xr_large) / len(blocks_large),
}
data_all = [data_bremen_small, data_bremen_medium, data_bremen_large, data_rander_small, data_rander_medium, data_rander_large]
df = pd.DataFrame(data_all)
df.to_csv(export_path+"/"+name+".csv", index=False)
def build_csv_stimuli_success(blocks, name, export_path):
bremen_blocks = get_bremen_blocks(blocks)
rander_blocks = get_randersacker_blocks(blocks)
bremen_success = 0
rander_success = 0
for i in bremen_blocks:
if i["controlLetterOne"] == i["responseOneLetter"]:
bremen_success += 1
if i["controlLetterTwo"] == i["responseTwoLetter"]:
bremen_success += 1
for i in rander_blocks:
if i["controlLetterOne"] == i["responseOneLetter"]:
rander_success += 1
if i["controlLetterTwo"] == i["responseTwoLetter"]:
rander_success += 1
data = {
"stimulus": "bremen",
"amount_blocks": len(bremen_blocks),
"success_rate": bremen_success / (len(bremen_blocks) * 2),
}
data2 = {
"stimulus": "randersacker",
"amount_blocks": len(rander_blocks),
"success_rate": rander_success / (len(rander_blocks) * 2),
}
data_all = [data, data2]
df = pd.DataFrame(data_all)
df.to_csv(export_path+"/"+name+".csv", index=False)
def build_csv_positions_permutations(blocks, position_bremen, position_rander,name, export_path, amount_positions=2):
position_combo = []
position_permuation_stats_bremen = []
position_permuation_stats_rander = []
def permutation_checker(arr, combo):
for sublist in arr:
count = 0
for item in sublist:
for k in combo:
if permutation_checker_two_arrays(item, k):
count += 1
if count == len(combo):
return True
return False
def permutation_checker_two_arrays(arr1, arr2):
if len(arr1) != len(arr2):
return False
return sorted(arr1) == sorted(arr2)
for i in blocks:
for j in i["data"]:
if amount_positions == 2:
combo1 = [j["positionOne"], j["positionTwo"]]
succ1 = False
if j["controlLetterOne"] == j["responseOneLetter"] and j["controlLetterTwo"] == j["responseTwoLetter"]:
succ1 = True
combo2 = [j["positionThree"], j["positionFour"]]
succ2 = False
if j["controlLetterThree"] == j["responseThreeLetter"] and j["controlLetterFour"] == j["responseFourLetter"]:
succ2 = True
elif amount_positions == 3:
combo1 = [j["positionOne"], j["positionTwo"], j["positionThree"]]
succ1 = False
if j["controlLetterOne"] == j["responseOneLetter"] and j["controlLetterTwo"] == j["responseTwoLetter"] and j["controlLetterThree"] == j["responseThreeLetter"]:
succ1 = True
combo2 = [j["positionFour"], j["positionFive"], j["positionSix"]]
succ2 = False
if j["controlLetterFour"] == j["responseFourLetter"] and j["controlLetterFive"] == j["responseFiveLetter"] and j["controlLetterSix"] == j["responseSixLetter"]:
succ2 = True
eval1 = {
"position": combo1,
"appearance": 1,
"successfull": 1 if succ1 else 0,
"successrate": 100 if succ1 else 0,
}
eval2 = {
"position": combo2,
"appearance": 1,
"successfull": 1 if succ2 else 0,
"successrate": 100 if succ2 else 0,
}
is_bremen = False
if j["cloudOne"][1] == "0":
is_bremen = True
if not permutation_checker(position_combo, combo1):
position_combo.append(combo1)
if is_bremen:
position_permuation_stats_bremen.append(eval1)
else:
position_permuation_stats_rander.append(eval1)
else:
if is_bremen:
for k in position_permuation_stats_bremen:
if permutation_checker_two_arrays(k["position"], combo1):
k["appearance"] += 1
if succ1:
k["successfull"] += 1
k["successrate"] = round(k["successfull"] / k["appearance"],2)* 100
else:
for k in position_permuation_stats_rander:
if permutation_checker_two_arrays(k["position"], combo1):
k["appearance"] += 1
if succ1:
k["successfull"] += 1
k["successrate"] = round(k["successfull"] / k["appearance"],2)* 100
if not permutation_checker(position_combo, combo2):
position_combo.append(combo2)
if is_bremen:
position_permuation_stats_bremen.append(eval2)
else:
position_permuation_stats_rander.append(eval2)
else:
if is_bremen:
for k in position_permuation_stats_bremen:
if permutation_checker_two_arrays(k["position"], combo2):
k["appearance"] += 1
if succ2:
k["successfull"] += 1
k["successrate"] = round(k["successfull"] / k["appearance"],2)* 100
else:
for k in position_permuation_stats_rander:
if permutation_checker_two_arrays(k["position"], combo2):
k["appearance"] += 1
if succ2:
k["successfull"] += 1
k["successrate"] = round(k["successfull"] / k["appearance"],2) * 100
for stat in position_permuation_stats_bremen:
for idx, pos in enumerate(stat["position"]):
for p, position in enumerate(position_bremen):
if position == pos:
stat["position"][idx] = "pos " + str(p+1)
for stat in position_permuation_stats_rander:
for idx, pos in enumerate(stat["position"]):
for p, position in enumerate(position_rander):
if position == pos:
stat["position"][idx] = "pos " + str(p+1)
df = pd.DataFrame(position_permuation_stats_bremen)
df = df.sort_values(by=['successrate'], ascending=False)
df.to_csv(export_path+"/"+name+"_bremen.csv", index=False)
df = pd.DataFrame(position_permuation_stats_rander)
df = df.sort_values(by=['successrate'], ascending=False)
df.to_csv(export_path+"/"+name+"_rander.csv", index=False)
def save_as_png(data, name, export_path):
df = pd.DataFrame(data)
df = df.sort_values(by=['successrate'], ascending=False)
df.insert(0, 'Index', range(1, len(df) + 1))
if amount_positions == 2:
fig, ax = plt.subplots(figsize=(10, 5))
elif amount_positions == 3:
fig, ax = plt.subplots(figsize=(15, 5))
# Hide axes
ax.axis('tight')
ax.axis('off')
# Create the table with a larger font size
table = ax.table(cellText=df.values, colLabels=df.columns, cellLoc='center', loc='center')
# Set the font size for the table
table.auto_set_font_size(False)
table.set_fontsize(8)
# Passe die Spaltenbreiten an
col_widths = [0.1, 0.2, 0.3, 0.4, 0.5] # Beispiel-Spaltenbreiten
for i, width in enumerate(col_widths):
table.auto_set_column_width(i)
for key, cell in table.get_celld().items():
if key[1] == i:
cell.set_width(width)
# Save the figure as a PNG file
plt.savefig(export_path+"/"+name+".png", bbox_inches='tight', dpi=300)
save_as_png(position_permuation_stats_bremen, name+"_bremen", export_path)
save_as_png(position_permuation_stats_rander, name+"_rander", export_path)