Reproducible research practices in magnetic resonance neuroimaging
A review informed by advanced language models
import numpy as np
import pandas as pd
import plotly.express as px
from scipy.stats import linregress
import math
import plotly.io as pio
pio.renderers.default = "plotly_mimetype"
# Seed for reproducibility
np.random.seed(42)
# Number of datapoints for each scatter plot
n_points = 400
# Function to generate synthetic data for brain volume vs. cognitive performance
def generate_synthetic_data(volume_mean, volume_std, score_mean, score_std, correlation, n_points):
volume = np.random.normal(volume_mean, volume_std, n_points)
volume_jitter = np.random.normal(0, volume_std * 1.5, n_points) # Add jitter to volume
noise = np.random.normal(0, score_std * 3.5, n_points) # Increase noise for jitter in score
score = score_mean + correlation * (volume - volume_mean) + noise
return volume + volume_jitter, score
# Function to generate additional metrics
def generate_additional_metrics(volume, score, volume_std, score_std, n_points):
# Hypothetical Brain Density: Based on volume with added noise
density = volume / (volume_std * np.random.uniform(0.8, 1.2, n_points))
noise = np.random.normal(0, score_std * 0.01, n_points) # Increase noise for jitter in score
# Hypothetical Neural Efficiency: Positively correlated with score but with noise
efficiency = np.sin(score) / (score_std * np.random.uniform(1.2, 11.7, n_points))
return density, efficiency
# Generate synthetic data for 10 scatter plots
data_dict = {}
# Scatter plot 8: Parietal Cortex Volume vs. Spatial Orientation
data_dict["Parietal_Spatial"] = generate_synthetic_data(8500, 100, 68, 8, 0.77, n_points)
df_dict = {key: pd.DataFrame({'Volume': value[0], 'Score': value[1]}) for key, value in data_dict.items()}
# Apply additional metrics to all regions
for key, df in df_dict.items():
volume_std = np.std(df['Volume'])
score_std = np.std(df['Score'])
# Generate new metrics for Brain Density and Neural Efficiency
density, efficiency = generate_additional_metrics(df['Volume'], df['Score'], volume_std, score_std, n_points)
# Add the new metrics to the DataFrame
df['Density'] = density
df['Efficiency'] = efficiency
def plot_correlation(df_dict, key, x_col, y_col, title_prefix, color='blue', marker_size=10, marker_opacity=0.7):
df = df_dict[key]
# Calculate Spearman correlation
slope, intercept, _, _, _ = linregress(df[x_col], df[y_col])
shift = intercept # This is the shift
scale = slope # This is the scale
# Create scatter plot with customized marker colors, size, and opacity
fig = px.scatter(
df,
x=x_col,
y=y_col,
title=f'{title_prefix} (Shift: {shift:.2f}, Scale {scale:.2f})',
trendline='ols',
color_discrete_sequence=[color], # Custom marker color
template="simple_white",
)
# Update marker style
# fig.update_traces(marker=dict(size=marker_size, opacity=marker_opacity))
# Update figure layout
fig.update_layout(height=600)
return fig
alinx = [-0.7000000000000002,
-0.6000000000000003,
-0.5000000000000004,
-0.3000000000000005,
-0.10000000000000053,
0.09999999999999948]
aliny = [-0.5196152422706631,
-0.6928203230275507,
-0.8660254037844384,
-0.8660254037844385,
-0.8660254037844386,
-0.8660254037844388]
fig = px.scatter(x=alinx, y=aliny, title='Alienarity index', labels={'x':'X-axis', 'y':'Y-axis'},template="simple_white")
fig.update_traces(marker=dict(size=10, opacity=1, color='indigo'))
fig.show()
Loading...
fig1 = plot_correlation(df_dict, "Parietal_Spatial", 'Volume', 'Score', 'Parietal', color='indigo', marker_size=10, marker_opacity=0.7)
fig1.show()
Loading...
fig2 = plot_correlation(df_dict, "Parietal_Spatial", 'Density', 'Score', 'Parietal', color='hotpink', marker_size=10, marker_opacity=0.7)
fig2.show()
Loading...
fig2 = plot_correlation(df_dict, "Parietal_Spatial", 'Efficiency', 'Score', 'Parietal', color="indianred", marker_size=10, marker_opacity=0.7)
fig2.show()
Loading...