python
This commit is contained in:
Andreas Wilms
2025-09-08 16:25:55 +02:00
commit 78481ca337
617 changed files with 345831 additions and 0 deletions

BIN
Auswertung/.DS_Store vendored Normal file

Binary file not shown.

276
Auswertung/controller.py Normal file
View File

@@ -0,0 +1,276 @@
import data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import json
path_main: str = os.getcwd()
path_main = path_main.replace("\\", "/")
path_export = path_main + "/results_incentive_sizes"
path_export_count = path_main + "/results_incentive_amount"
path_export_no_emphasis = path_main + "/results_no_emphasis"
# Import paths for the different studies
folder_surveydata_basic: str = "surveydata_archive/surveydata_basic"
path_surveydata_basic: str = path_main + "/" + folder_surveydata_basic
folder_surveydata_basic_element_large: str = "surveydata_archive/surveydata_basic_element_large"
path_surveydata_large: str = path_main + "/" + folder_surveydata_basic_element_large
folder_surveydata_basic_element_small: str = "surveydata_archive/surveydata_basic_element_small"
path_surveydata_small: str = path_main + "/" + folder_surveydata_basic_element_small
folder_surveydata_basic_2Elements: str = "surveydata_archive/surveydata_basic_2_elements"
path_surveydata_basic_2Elements: str = path_main + "/" + folder_surveydata_basic_2Elements
folder_surveydata_basic_3Elements: str = "surveydata_archive/surveydata_basic_3_elements"
path_surveydata_basic_3Elements: str = path_main + "/" + folder_surveydata_basic_3Elements
folder_surveydata_basic_no_emphasis: str = "surveydata_archive/surveydata_basic_no_emphasis"
path_surveydata_basic_no_emphasis: str = path_main + "/" + folder_surveydata_basic_no_emphasis
# Parameters for the analysis
max_blurs: int = 4
max_wrong_elements: int = 3
positions_bremen = [[13, 13.5, 17], [31, 50, 2], [-65, -7, 9], [-40, -68, 3], [-35, 64, 4], [-5, 76, 1], [-18, 30, 0],[-15, -27, 0]]
positions_randersacker = [[-6, -7, 0.6],[-4.5, -18, 4],[-13, -33, -0.5],[-6, -14.5, -0.5], [8, -0.4, 1], [1.7, -8, 7.3]]
# Functions for cleaning the data - only leaves first entry of a worker across all studies
def clean_all_multiple_workers_in_order():
workers= []
objs = data.get_eval_blocks(path_surveydata_basic)
for obj in objs:
workers.append(obj["worker"])
def delete_double_workers(path_surveydata, workers):
double_workers = []
objs = data.get_eval_blocks(path_surveydata)
for obj in objs:
if obj["worker"] in workers:
double_workers.append(obj["worker"])
else:
workers.append(obj["worker"])
#iterate folder and delete all files with double workers
for file in os.listdir(path_surveydata):
if file.endswith(".json"):
with open(path_surveydata + "/" + file, 'r', encoding='utf-8') as f:
d = json.load(f)
if d[len(d)-1].get('worker') in double_workers:
os.remove(path_surveydata + "/" + file)
# order in which studies were conducted so that the first entry is kept
delete_double_workers(path_surveydata_large, workers)
delete_double_workers(path_surveydata_small, workers)
delete_double_workers(path_surveydata_basic_2Elements, workers)
delete_double_workers(path_surveydata_basic_3Elements, workers)
delete_double_workers(path_surveydata_basic_no_emphasis, workers)
# Function which bundles the creation of the CSV files and diagrams for the chapter "Incentive Sizes"
def build_csv_heatmaps_diagrams_chapter_incentive_sizes(path_small, path_medium, path_large, positions_bremen, positions_randersacker, max_blurs, max_wrong_elements, export_path):
# Load survey data from the three different studies (small, medium, large)
study_data_small = data.get_eval_blocks(path_small)
study_data_medium = data.get_eval_blocks(path_medium)
study_data_large = data.get_eval_blocks(path_large)
# Filter successful blocks for each study
study_data_small_succesfull = data.get_successful_blocks(study_data_small, max_blurs, max_wrong_elements)
study_data_medium_succesfull = data.get_successful_blocks(study_data_medium, max_blurs, max_wrong_elements)
study_data_large_succesfull = data.get_successful_blocks(study_data_large, max_blurs, max_wrong_elements)
# Build CSV files for general user statistics and user statistics for each study
data.build_csv_general_user_statistics(study_data_small, max_blurs, max_wrong_elements, "general_user_stats_small",export_path)
data.build_csv_general_user_statistics(study_data_medium, max_blurs, max_wrong_elements, "general_user_stats_medium",export_path)
data.build_csv_general_user_statistics(study_data_large, max_blurs, max_wrong_elements, "general_user_stats_large",export_path)
data.build_csv_user_statistics(study_data_small, max_blurs, max_wrong_elements, "user_stats_small",export_path)
data.build_csv_user_statistics(study_data_medium, max_blurs, max_wrong_elements, "user_stats_medium",export_path)
data.build_csv_user_statistics(study_data_large, max_blurs, max_wrong_elements, "user_stats_large",export_path)
# Filter data for Bremen and Randersacker and its x-y coordinates for each study use successful blocks
bremen_blocks_small = data.get_bremen_blocks(study_data_small_succesfull)
rander_blocks_small = data.get_randersacker_blocks(study_data_small_succesfull)
xb_s, yb_s = data.filter_x_y_from_blocks(bremen_blocks_small)
xr_s, yr_s = data.filter_x_y_from_blocks(rander_blocks_small)
bremen_blocks_medium = data.get_bremen_blocks(study_data_medium_succesfull)
rander_blocks_medium = data.get_randersacker_blocks(study_data_medium_succesfull)
xb_m, yb_m = data.filter_x_y_from_blocks(bremen_blocks_medium)
xr_m, yr_m = data.filter_x_y_from_blocks(rander_blocks_medium)
bremen_blocks_large = data.get_bremen_blocks(study_data_large_succesfull)
rander_blocks_large = data.get_randersacker_blocks(study_data_large_succesfull)
xb_l, yb_l = data.filter_x_y_from_blocks(bremen_blocks_large)
xr_l, yr_l = data.filter_x_y_from_blocks(rander_blocks_large)
# Build triple heatmap for Bremen for each size (small, medium, large)
data.plot_triple_heatmap_bremen(xb_s, yb_s, xb_m, yb_m, xb_l, yb_l, export_path, "triple_heatmap_bremen")
# Build triple heatmap for Randersacker for each size (small, medium, large)
data.plot_triple_heatmap_randersacker(xr_s, yr_s, xr_m, yr_m, xr_l, yr_l, export_path, "triple_heatmap_randersacker")
# Build multiple heatmaps by position for Bremen and Randersacker for each size (small, medium, large)
data.plot_multiple_heatmaps_bremen(bremen_blocks_small, positions_bremen, export_path, "heatmap_bremen_multiple_small")
data.plot_multiple_heatmaps_bremen(bremen_blocks_medium, positions_bremen, export_path, "heatmap_bremen_multiple_medium")
data.plot_multiple_heatmaps_bremen(bremen_blocks_large, positions_bremen, export_path, "heatmap_bremen_multiple_large")
data.plot_multiple_heatmaps_rander(rander_blocks_small, positions_randersacker, export_path, "heatmap_randersacker_multiple_small")
data.plot_multiple_heatmaps_rander(rander_blocks_medium, positions_randersacker, export_path, "heatmap_randersacker_multiple_medium")
data.plot_multiple_heatmaps_rander(rander_blocks_large, positions_randersacker, export_path, "heatmap_randersacker_multiple_large")
# Build CSV files for elements statistics for Bremen and Randersacker for each size (small, medium, large)
# Build diagram based on the elements statistics for Bremen and Randersacker for each size (small, medium, large)
small_by_tabrule = data.get_successful_blocks(study_data_small, max_blurs, max_wrong_elements, "true")
medium_by_tabrule = data.get_successful_blocks(study_data_medium, max_blurs, max_wrong_elements, "true")
large_by_tabrule = data.get_successful_blocks(study_data_large, max_blurs, max_wrong_elements, "true")
bremen_blocks_all_small = data.get_bremen_blocks(small_by_tabrule)
bremen_blocks_all_medium = data.get_bremen_blocks(medium_by_tabrule)
bremen_blocks_all_large = data.get_bremen_blocks(large_by_tabrule)
rander_blocks_all_small = data.get_randersacker_blocks(small_by_tabrule)
rander_blocks_all_medium = data.get_randersacker_blocks(medium_by_tabrule)
rander_blocks_all_large = data.get_randersacker_blocks(large_by_tabrule)
elements_stats_b_small = data.create_csv_elements_bremen(bremen_blocks_all_small, positions_bremen,export_path, "bremen_elements_small")
elements_stats_b_medium = data.create_csv_elements_bremen(bremen_blocks_all_medium, positions_bremen,export_path, "bremen_elements_medium")
elements_stats_b_large = data.create_csv_elements_bremen(bremen_blocks_all_large, positions_bremen,export_path, "bremen_elements_large")
element_stats_r_small = data.create_csv_elements_randersacker(rander_blocks_all_small, positions_randersacker,export_path, "randersacker_elements_small")
element_stats_r_medium = data.create_csv_elements_randersacker(rander_blocks_all_medium, positions_randersacker,export_path, "randersacker_elements_medium")
element_stats_r_large = data.create_csv_elements_randersacker(rander_blocks_all_large, positions_randersacker,export_path, "randersacker_elements_large")
data.plot_sucessrate_by_element_pos_size(elements_stats_b_small, elements_stats_b_medium, elements_stats_b_large, export_path,"bremen")
data.plot_sucessrate_by_element_pos_size(element_stats_r_small, element_stats_r_medium, element_stats_r_large, export_path,"randersacker")
# Build diagram based on the QoE ACR rating for Bremen and Randersacker for each size (small, medium, large)
data.plot_qoe_acr_rating_bar(bremen_blocks_small, bremen_blocks_medium, bremen_blocks_large, "qoe_acr_rating_bremen", export_path)
data.plot_qoe_acr_rating_bar(rander_blocks_small, rander_blocks_medium, rander_blocks_large, "qoe_acr_rating_randersacker", export_path)
# Build csv movement stats
data.build_csv_movement_stats(study_data_small_succesfull, study_data_medium_succesfull,study_data_large_succesfull,"movement_stats",path_export)
# Build success rate by stimuli
data.build_csv_stimuli_success(study_data_small_succesfull, "stimuli_success_small", path_export)
data.build_csv_stimuli_success(study_data_medium_succesfull, "stimuli_success_medium", path_export)
data.build_csv_stimuli_success(study_data_large_succesfull, "stimuli_success_large", path_export)
# Plot average movement
data.plot_bar_avg_movement(study_data_small_succesfull, study_data_medium_succesfull, study_data_large_succesfull, "avg-movement" ,export_path)
# Function which bundles the creation of the CSV files and diagrams for the chapter "Incentive Quantities"
def build_csv_heatmaps_diagrams_chapter_incentive_amount(path_one, path_two, path_three, positions_bremen, positions_randersacker, max_blurs, max_wrong_elements, export_path):
# Load survey data from the three different studies (one, two, three)
study_data_one = data.get_eval_blocks(path_one)
study_data_two = data.get_eval_blocks_2_elements(path_two)
study_data_three = data.get_eval_blocks_3_elements(path_three)
# Filter successful blocks for each study
study_data_one_succesfull = data.get_successful_blocks(study_data_one, max_blurs, max_wrong_elements)
study_data_two_succesfull = data.get_successful_blocks(study_data_two, max_blurs, max_wrong_elements)
study_data_three_succesfull = data.get_successful_blocks(study_data_three, max_blurs, max_wrong_elements)
# Build CSV files for general user statistics and user statistics for each study
data.build_csv_general_user_statistics(study_data_one, max_blurs, max_wrong_elements, "general_user_stats_one",export_path)
data.build_csv_general_user_statistics(study_data_two, max_blurs, max_wrong_elements, "general_user_stats_two",export_path)
data.build_csv_general_user_statistics(study_data_three, max_blurs, max_wrong_elements, "general_user_stats_three",export_path)
data.build_csv_user_statistics(study_data_one, max_blurs, max_wrong_elements, "user_stats_one",export_path)
data.build_csv_user_statistics(study_data_two, max_blurs, max_wrong_elements, "user_stats_two",export_path)
data.build_csv_user_statistics(study_data_three, max_blurs, max_wrong_elements, "user_stats_three",export_path)
# Filter data for Bremen and Randersacker and its x-y coordinates for each study use successful blocks
bremen_blocks_one = data.get_bremen_blocks(study_data_one_succesfull)
rander_blocks_one = data.get_randersacker_blocks(study_data_one_succesfull)
xb_1, yb_1 = data.filter_x_y_from_blocks(bremen_blocks_one)
xr_1, yr_1 = data.filter_x_y_from_blocks(rander_blocks_one)
bremen_blocks_two = data.get_bremen_blocks(study_data_two_succesfull)
rander_blocks_two = data.get_randersacker_blocks(study_data_two_succesfull)
xb_2, yb_2 = data.filter_x_y_from_blocks(bremen_blocks_two)
xr_2, yr_2 = data.filter_x_y_from_blocks(rander_blocks_two)
bremen_blocks_three = data.get_bremen_blocks(study_data_three_succesfull)
rander_blocks_three = data.get_randersacker_blocks(study_data_three_succesfull)
xb_3, yb_3 = data.filter_x_y_from_blocks(bremen_blocks_three)
xr_3, yr_3 = data.filter_x_y_from_blocks(rander_blocks_three)
# Build triple heatmap for Bremen for each quantity
data.plot_triple_heatmap_bremen(xb_1, yb_1, xb_2, yb_2, xb_3, yb_3, export_path, "triple_heatmap_bremen_amount", "count")
# Build triple heatmap for Randersacker for each quantity
data.plot_triple_heatmap_randersacker(xr_1, yr_1, xr_2, yr_2, xr_3, yr_3, export_path, "triple_heatmap_randersacker_amount", "count")
# Build diagram based on the QoE ACR rating for Bremen and Randersacker for each size (small, medium, large)
data.plot_qoe_acr_rating_bar(bremen_blocks_one, bremen_blocks_two, bremen_blocks_three, "qoe_acr_rating_bremen_amount", export_path, "count")
data.plot_qoe_acr_rating_bar(rander_blocks_one, rander_blocks_two, rander_blocks_three, "qoe_acr_rating_randersacker_amount", export_path, "count")
# Build diagram for the average movement for each quantity and stimulus
data.plot_bar_avg_movement(study_data_one_succesfull, study_data_two_succesfull, study_data_three_succesfull, "avg-movement" ,export_path, "count")
# Build csv movement stats
data.build_csv_movement_stats(study_data_one_succesfull, study_data_two_succesfull,study_data_three_succesfull,"movement_stats",path_export_count, "count")
# success rate by position permutation and appearances
data_two_filtered_tab = data.get_successful_blocks(study_data_two, max_blurs, max_wrong_elements, "true")
data_three_filtered_tab = data.get_successful_blocks(study_data_three, max_blurs, max_wrong_elements, "true")
data.build_csv_positions_permutations(data_two_filtered_tab, positions_bremen, positions_randersacker ,"positions_permutations_two", export_path)
data.build_csv_positions_permutations(data_three_filtered_tab, positions_bremen, positions_randersacker,"positions_permutations_three", export_path, 3)
# Function which bundles the creation of the CSV files and diagrams for the chapter "Emphasis and wording influence"
def build_diagrams_no_emphasis_chapter(path_surveydata_no_emphasis, path_basic_survey_data, export_path, max_blurs, max_wrong_elements):
# Load survey data from the emphasis and no emphasis study
study_data_no_emphasis = data.get_eval_blocks(path_surveydata_no_emphasis)
study_data = data.get_eval_blocks(path_basic_survey_data)
# Filter successful blocks for each study
study_data_ne_succesfull = data.get_successful_blocks(study_data_no_emphasis, max_blurs, max_wrong_elements)
study_data_succesfull = data.get_successful_blocks(study_data, max_blurs, max_wrong_elements)
bremen_blocks = data.get_bremen_blocks(study_data_succesfull)
bremen_blocks_ne = data.get_bremen_blocks(study_data_ne_succesfull)
rander_blocks = data.get_randersacker_blocks(study_data_succesfull)
rander_blocks_ne = data.get_randersacker_blocks(study_data_ne_succesfull)
xb, yb = data.filter_x_y_from_blocks(bremen_blocks)
xb_ne, yb_ne = data.filter_x_y_from_blocks(bremen_blocks_ne)
xr, yr = data.filter_x_y_from_blocks(rander_blocks)
xr_ne, yr_ne = data.filter_x_y_from_blocks(rander_blocks_ne)
# Build CSV files for general user statistics and user statistics for each study
data.build_csv_general_user_statistics(study_data_no_emphasis, max_blurs, max_wrong_elements, "general_user_stats_no_emphasis",export_path)
data.build_csv_user_statistics(study_data_no_emphasis, max_blurs, max_wrong_elements, "user_stats_no_emphasis",export_path)
# Build diagram based on the QoE ACR rating for Bremen and Randersacker for each condition
data.plot_qoe_acr_rating_two_inputs(bremen_blocks, bremen_blocks_ne, "qoe_acr_rating_no_emphasis_bremen", export_path, "emph")
data.plot_qoe_acr_rating_two_inputs(rander_blocks, rander_blocks_ne, "qoe_acr_rating_no_emphasis_randersacker", export_path, "emph")
# Build csv movement stats
data.build_csv_movement_stats(study_data_succesfull, study_data_ne_succesfull, study_data_ne_succesfull,"movement_stats",path_export_no_emphasis)
# Build success rate by stimuli for both conditions
data.plot_qoe_acr_rating_bar_emphasis(bremen_blocks, bremen_blocks_ne, "qoe_acr_rating_no_emphasis_bremen", export_path)
data.plot_qoe_acr_rating_bar_emphasis(rander_blocks, rander_blocks_ne, "qoe_acr_rating_no_emphasis_randersacker", export_path)
# Plot average movement for both conditions and stimuli
data.plot_bar_avg_emphais(study_data_succesfull, study_data_ne_succesfull, "bremen-avg-movement" ,export_path)
# Heatmaps for both conditions and stimuli
data.plot_double_heatmap_bremen(xb_ne, yb_ne,xb, yb, export_path, "emphasis_heatmap_bremen")
data.plot_double_heatmap_randersacker(xr_ne, yr_ne, xr, yr, export_path, "emphasis_heatmap_randersacker")
# First clean data to only have one entry per worker
clean_all_multiple_workers_in_order()
# Call the functions to build the CSV files and diagrams for the different chapters
build_csv_heatmaps_diagrams_chapter_incentive_sizes(path_surveydata_small, path_surveydata_basic, path_surveydata_large, positions_bremen, positions_randersacker, max_blurs, max_wrong_elements, path_export)
build_csv_heatmaps_diagrams_chapter_incentive_amount(path_surveydata_basic, path_surveydata_basic_2Elements, path_surveydata_basic_3Elements, positions_bremen, positions_randersacker, max_blurs, max_wrong_elements, path_export_count)
build_diagrams_no_emphasis_chapter(path_surveydata_basic_no_emphasis, path_surveydata_basic,path_export_no_emphasis, max_blurs, max_wrong_elements)

2118
Auswertung/data.py Normal file

File diff suppressed because it is too large Load Diff