"""
Module: PCA (Analyse en Composantes Principales)
Categorie: Unsupervised
Difficulte: Intermediaire

Genere depuis la plateforme ML Formation
"""

# Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score

# Charger le dataset
df = pd.read_csv('high_dimensional.csv')

# Explorer les donnees haute dimension
# Type: Code executable
print("=" * 70)
print("    EXPLORATION DES DONNEES HAUTE DIMENSION")
print("=" * 70)

print("\n" + "=" * 70)
print("1. APERCU DU DATASET")
print("=" * 70)
print("""
Ce dataset illustre le probleme de la haute dimensionnalite.
Avec 10 features, impossible de visualiser directement les donnees!

La PCA va nous permettre de reduire a 2D tout en preservant
l'information essentielle.
""")
display(df.head(10), title="Dataset Haute Dimension")

print(f"\n  Dimensions du dataset:")
print(f"    • {df.shape[0]} echantillons")
print(f"    • {df.shape[1]} colonnes (dont 1 categorie)")
print(f"    • {df.shape[1] - 1} features numeriques")

print("\n" + "=" * 70)
print("2. DISTRIBUTION DES CATEGORIES")
print("=" * 70)
cat_counts = df['category'].value_counts()
print("\nRepartition des categories:")
print("-" * 40)
for cat, count in cat_counts.items():
    pct = count / len(df) * 100
    bar = "█" * int(pct / 3)
    print(f"  Categorie {cat}: {count:3d} ({pct:.0f}%)  {bar}")

print(f"""
Notre objectif:
  → Reduire de 10 dimensions a 2 dimensions
  → Verifier si les categories restent separables
""")

print("\n" + "=" * 70)
print("3. MATRICE DE CORRELATIONS")
print("=" * 70)
print("""
Les correlations entre features sont importantes pour la PCA.
Des features tres correlees contiennent de l'information redondante
que la PCA va "compresser" en une seule composante.
""")

numeric_cols = [c for c in df.columns if c != 'category']
corr = df[numeric_cols].corr()
display(corr.round(2), title="Correlations entre features")

# Analyser les correlations
print("\n" + "-" * 40)
print("ANALYSE DES CORRELATIONS:")
print("-" * 40)

strong_corr = []
for i in range(len(numeric_cols)):
    for j in range(i+1, len(numeric_cols)):
        c = abs(corr.iloc[i, j])
        if c > 0.5:
            strong_corr.append((numeric_cols[i], numeric_cols[j], corr.iloc[i, j]))

if strong_corr:
    print("\n  Correlations fortes detectees (|r| > 0.5):")
    for f1, f2, c in sorted(strong_corr, key=lambda x: abs(x[2]), reverse=True)[:5]:
        print(f"    • {f1} <-> {f2}: {c:+.2f}")
    print("\n  → Ces correlations seront capturees par la PCA!")
else:
    print("\n  Pas de correlations fortes detectees.")
    print("  → Chaque feature apporte de l'information unique.")

print("\n" + "=" * 70)
print("RESUME")
print("=" * 70)
print(f"""
Nous avons {len(numeric_cols)} features - impossible a visualiser!

La PCA va:
  1. Trouver les directions de variance maximale
  2. Projeter les donnees sur ces directions
  3. Nous permettre de visualiser en 2D
""")


# Visualiser le probleme de la haute dimension
# Type: Code executable
print("=" * 70)
print("    LE PROBLEME: VISUALISER 10 DIMENSIONS")
print("=" * 70)
print("""
Avec 10 features, on ne peut visualiser que des PAIRES de features.
Cela donne 10×9/2 = 45 graphiques possibles!

Prenons quelques paires pour voir le probleme.
""")

fig, axes = plt.subplots(2, 2, figsize=(12, 10))
colors = {'A': '#9B7AC4', 'B': '#C09CF0', 'C': '#F7E64D'}

# Feature 1 vs Feature 2
for cat in df['category'].unique():
    mask = df['category'] == cat
    axes[0, 0].scatter(df[mask]['feature_1'], df[mask]['feature_2'],
                   c=colors[cat], label=f'Cat {cat}', alpha=0.7, s=50)
axes[0, 0].set_xlabel('Feature 1')
axes[0, 0].set_ylabel('Feature 2')
axes[0, 0].set_title('Feature 1 vs Feature 2')
axes[0, 0].legend()
axes[0, 0].grid(True, alpha=0.3)

# Feature 3 vs Feature 4
for cat in df['category'].unique():
    mask = df['category'] == cat
    axes[0, 1].scatter(df[mask]['feature_3'], df[mask]['feature_4'],
                   c=colors[cat], label=f'Cat {cat}', alpha=0.7, s=50)
axes[0, 1].set_xlabel('Feature 3')
axes[0, 1].set_ylabel('Feature 4')
axes[0, 1].set_title('Feature 3 vs Feature 4')
axes[0, 1].legend()
axes[0, 1].grid(True, alpha=0.3)

# Feature 5 vs Feature 6
for cat in df['category'].unique():
    mask = df['category'] == cat
    axes[1, 0].scatter(df[mask]['feature_5'], df[mask]['feature_6'],
                   c=colors[cat], label=f'Cat {cat}', alpha=0.7, s=50)
axes[1, 0].set_xlabel('Feature 5')
axes[1, 0].set_ylabel('Feature 6')
axes[1, 0].set_title('Feature 5 vs Feature 6')
axes[1, 0].legend()
axes[1, 0].grid(True, alpha=0.3)

# Feature 7 vs Feature 8
for cat in df['category'].unique():
    mask = df['category'] == cat
    axes[1, 1].scatter(df[mask]['feature_7'], df[mask]['feature_8'],
                   c=colors[cat], label=f'Cat {cat}', alpha=0.7, s=50)
axes[1, 1].set_xlabel('Feature 7')
axes[1, 1].set_ylabel('Feature 8')
axes[1, 1].set_title('Feature 7 vs Feature 8')
axes[1, 1].legend()
axes[1, 1].grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

print("\n" + "=" * 70)
print("LE PROBLEME")
print("=" * 70)
print("""
Ce que nous observons:
  • Chaque graphique ne montre que 2 features sur 10
  • Les categories se chevauchent souvent
  • On ne voit pas la structure globale des donnees
  • Impossible de trouver la "meilleure" vue manuellement

LA SOLUTION - PCA:
  → Trouver automatiquement les 2 meilleures dimensions
  → Combiner les 10 features en 2 composantes optimales
  → Maximiser la separation visible des donnees
""")


# Appliquer la PCA
# Type: Code executable
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

print("=" * 70)
print("         APPLICATION DE LA PCA")
print("=" * 70)

# Preparer les donnees (exclure la categorie)
feature_cols = [c for c in df.columns if c != 'category']
X = df[feature_cols].values
y = df['category'].values

print("\n" + "=" * 70)
print("1. PREPARATION DES DONNEES")
print("=" * 70)
print(f"""
Donnees originales:
  • {X.shape[0]} echantillons
  • {X.shape[1]} features
""")

print("\n" + "=" * 70)
print("2. NORMALISATION (ETAPE CRITIQUE!)")
print("=" * 70)
print("""
IMPORTANT: La PCA est sensible a l'echelle des features!

Sans normalisation, les features avec grandes valeurs
domineraient la variance et donc les composantes principales.
""")

scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

print("  Comparaison avant/apres normalisation:")
print("  " + "-" * 50)
print(f"  {'Feature':<12} | {'Avant (moy±std)':<20} | {'Apres (moy±std)':<20}")
print("  " + "-" * 50)
for i, name in enumerate(feature_cols[:4]):  # Montrer les 4 premieres
    before = f"{X[:, i].mean():.2f} ± {X[:, i].std():.2f}"
    after = f"{X_scaled[:, i].mean():.2f} ± {X_scaled[:, i].std():.2f}"
    print(f"  {name:<12} | {before:<20} | {after:<20}")
print(f"  {'...':<12} | {'...':<20} | {'...':<20}")

print("""
Resultat:
  → Moyenne = 0, Ecart-type = 1 pour toutes les features
  → Toutes les features contribuent equitablement
""")

print("\n" + "=" * 70)
print("3. APPLICATION DE LA PCA")
print("=" * 70)

# Appliquer PCA (reduire a 2 dimensions)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_scaled)

print(f"""
Transformation effectuee:
  • Dimensions AVANT: {X.shape[1]}
  • Dimensions APRES: {X_pca.shape[1]}
  • Compression: {(1 - X_pca.shape[1]/X.shape[1])*100:.0f}%
""")

print("\n" + "=" * 70)
print("4. VARIANCE EXPLIQUEE")
print("=" * 70)
print("""
La variance expliquee indique COMBIEN d'information
est preservee par chaque composante principale.
""")

print(f"\n  Variance par composante:")
print("  " + "-" * 40)
total_var = 0
for i, var in enumerate(pca.explained_variance_ratio_):
    total_var += var
    bar = "█" * int(var * 50)
    print(f"    PC{i+1}: {var:6.2%}  {bar}")

print(f"\n  Variance TOTALE preservee: {total_var:.2%}")

# Interpretation
if total_var > 0.90:
    interpretation = "Excellent! Plus de 90% de l'info preservee."
elif total_var > 0.80:
    interpretation = "Bon! Entre 80-90% de l'info preservee."
elif total_var > 0.70:
    interpretation = "Acceptable. 70-80% preservee."
else:
    interpretation = "Attention: moins de 70% preservee."

print(f"  Interpretation: {interpretation}")

print("\n" + "=" * 70)
print("RESUME")
print("=" * 70)
print(f"""
Nous avons reduit {X.shape[1]} dimensions a {X_pca.shape[1]} dimensions
tout en preservant {total_var:.1%} de l'information!

Les nouvelles coordonnees (PC1, PC2) sont des COMBINAISONS
LINEAIRES des features originales.
""")


# Visualiser les donnees reduites
# Type: Code executable
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

print("=" * 70)
print("    VISUALISATION DES DONNEES APRES PCA")
print("=" * 70)
print("""
Le pouvoir de la PCA: visualiser 10 dimensions en 2D!
""")

# Preparation
feature_cols = [c for c in df.columns if c != 'category']
X = df[feature_cols].values
y = df['category'].values
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# PCA
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_scaled)

# Visualisation
fig, axes = plt.subplots(1, 2, figsize=(14, 5))

colors = {'A': '#9B7AC4', 'B': '#C09CF0', 'C': '#F7E64D'}

# Plot 1: Donnees PCA
for cat in np.unique(y):
    mask = y == cat
    axes[0].scatter(X_pca[mask, 0], X_pca[mask, 1],
               c=colors[cat], label=f'Categorie {cat}', alpha=0.7, s=60, edgecolors='black')

axes[0].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.1%} variance)')
axes[0].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.1%} variance)')
axes[0].set_title('Donnees projetees sur PC1 et PC2')
axes[0].legend()
axes[0].grid(True, alpha=0.3)
axes[0].axhline(y=0, color='black', linewidth=0.5)
axes[0].axvline(x=0, color='black', linewidth=0.5)

# Plot 2: Comparaison avec features originales
for cat in np.unique(y):
    mask = y == cat
    axes[1].scatter(df[mask]['feature_1'], df[mask]['feature_2'],
               c=colors[cat], label=f'Categorie {cat}', alpha=0.7, s=60, edgecolors='black')

axes[1].set_xlabel('Feature 1')
axes[1].set_ylabel('Feature 2')
axes[1].set_title('Features originales (2 sur 10)')
axes[1].legend()
axes[1].grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

print("\n" + "=" * 70)
print("INTERPRETATION")
print("=" * 70)
print(f"""
GRAPHIQUE DE GAUCHE (PCA):
  → Les {len(np.unique(y))} categories sont maintenant clairement visibles!
  → PC1 capture la direction de plus grande variance
  → PC2 capture la deuxieme direction orthogonale

GRAPHIQUE DE DROITE (Original):
  → Seulement 2 features sur {X.shape[1]}
  → Separation moins claire

POURQUOI LA PCA FONCTIONNE:
  → Elle combine TOUTES les features de facon optimale
  → Elle trouve la projection qui preserve le maximum de variance
  → Les categories separees en haute dimension le restent en 2D
""")

# Statistiques par categorie dans l'espace PCA
print("\n" + "=" * 70)
print("POSITIONS DES CATEGORIES DANS L'ESPACE PCA")
print("=" * 70)
print(f"\n{'Categorie':<12} | {'Centre PC1':^12} | {'Centre PC2':^12}")
print("-" * 42)
for cat in np.unique(y):
    mask = y == cat
    pc1_mean = X_pca[mask, 0].mean()
    pc2_mean = X_pca[mask, 1].mean()
    print(f"{cat:<12} | {pc1_mean:^12.2f} | {pc2_mean:^12.2f}")


# Choisir le nombre de composantes
# Type: Code executable
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

print("=" * 70)
print("    CHOIX DU NOMBRE DE COMPOSANTES")
print("=" * 70)
print("""
Comment decider combien de composantes garder?

Regles courantes:
  • Garder 90-95% de la variance totale
  • Utiliser le "coude" dans le graphique
  • Compromis entre compression et perte d'info
""")

# Preparation
feature_cols = [c for c in df.columns if c != 'category']
X = df[feature_cols].values
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# PCA avec toutes les composantes
pca_full = PCA()
pca_full.fit(X_scaled)

# Variance expliquee cumulee
cumsum = np.cumsum(pca_full.explained_variance_ratio_)

print("\n" + "=" * 70)
print("VARIANCE EXPLIQUEE PAR COMPOSANTE")
print("=" * 70)
print(f"\n{'PC':^5} | {'Variance':^10} | {'Cumulee':^10} | {'Graphique':<30}")
print("-" * 60)

for i, (var, cum) in enumerate(zip(pca_full.explained_variance_ratio_, cumsum)):
    bar = "█" * int(var * 40)
    marker = " ← 95%" if cum >= 0.95 and (i == 0 or cumsum[i-1] < 0.95) else ""
    marker = " ← 90%" if cum >= 0.90 and (i == 0 or cumsum[i-1] < 0.90) else marker
    print(f"PC{i+1:^3} | {var:^10.2%} | {cum:^10.2%} | {bar}{marker}")

print("-" * 60)

# Visualisation
fig, axes = plt.subplots(1, 2, figsize=(14, 5))

# Variance par composante (Scree plot)
x_pos = range(1, len(pca_full.explained_variance_ratio_) + 1)
bars = axes[0].bar(x_pos, pca_full.explained_variance_ratio_, color='#9B7AC4', edgecolor='black')
axes[0].set_xlabel('Composante Principale')
axes[0].set_ylabel('Variance Expliquee')
axes[0].set_title('Scree Plot - Variance par Composante')
axes[0].set_xticks(x_pos)
axes[0].grid(True, alpha=0.3, axis='y')

# Annoter les deux premieres
for i in range(min(3, len(bars))):
    axes[0].text(bars[i].get_x() + bars[i].get_width()/2, bars[i].get_height() + 0.01,
                f'{pca_full.explained_variance_ratio_[i]:.1%}', ha='center', fontsize=9)

# Variance cumulee
axes[1].plot(x_pos, cumsum, marker='o', color='#9B7AC4', linewidth=2, markersize=8)
axes[1].axhline(y=0.95, color='#F7E64D', linestyle='--', linewidth=2, label='95% variance')
axes[1].axhline(y=0.90, color='#C09CF0', linestyle='--', linewidth=2, label='90% variance')
axes[1].fill_between(x_pos, cumsum, alpha=0.3, color='#9B7AC4')
axes[1].set_xlabel('Nombre de Composantes')
axes[1].set_ylabel('Variance Expliquee Cumulee')
axes[1].set_title('Variance Cumulee')
axes[1].set_xticks(x_pos)
axes[1].legend()
axes[1].grid(True, alpha=0.3)
axes[1].set_ylim(0, 1.05)

plt.tight_layout()
plt.show()

# Trouver les seuils
n_95 = np.argmax(cumsum >= 0.95) + 1
n_90 = np.argmax(cumsum >= 0.90) + 1

print("\n" + "=" * 70)
print("RECOMMANDATIONS")
print("=" * 70)
print(f"""
Nombre de composantes recommandees:
  • Pour 90% de variance: {n_90} composantes (sur {X.shape[1]})
  • Pour 95% de variance: {n_95} composantes (sur {X.shape[1]})

Compression possible:
  • 90% variance: reduction de {X.shape[1]} → {n_90} dimensions ({(1-n_90/X.shape[1])*100:.0f}% de compression)
  • 95% variance: reduction de {X.shape[1]} → {n_95} dimensions ({(1-n_95/X.shape[1])*100:.0f}% de compression)

Pour la visualisation: 2 composantes (PC1 + PC2)
  → Variance preservee: {cumsum[1]:.1%}
""")


# Interpreter les composantes
# Type: Code executable
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

print("=" * 70)
print("    INTERPRETATION DES COMPOSANTES PRINCIPALES")
print("=" * 70)
print("""
Les composantes principales sont des COMBINAISONS LINEAIRES
des features originales. Les "loadings" nous disent combien
chaque feature contribue a chaque composante.
""")

# Preparation
feature_cols = [c for c in df.columns if c != 'category']
X = df[feature_cols].values
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# PCA
pca = PCA(n_components=2)
pca.fit(X_scaled)

print("\n" + "=" * 70)
print("1. LOADINGS (POIDS DES FEATURES)")
print("=" * 70)
print("""
Le loading indique la correlation entre la feature et la composante.
- Loading positif fort: feature contribue positivement a PC
- Loading negatif fort: feature contribue negativement a PC
- Loading proche de 0: feature peu importante pour cette PC
""")

# Loadings (contribution de chaque feature originale)
loadings = pd.DataFrame(
    pca.components_.T,
    columns=['PC1', 'PC2'],
    index=feature_cols
)

print("\n  Tableau des loadings:")
display(loadings.round(3), title="Loadings des Composantes")

print("\n" + "=" * 70)
print("2. INTERPRETATION DES LOADINGS")
print("=" * 70)

for pc_idx, pc_name in enumerate(['PC1', 'PC2']):
    pc_loadings = loadings[pc_name].abs().sort_values(ascending=False)
    print(f"\n  {pc_name} ({pca.explained_variance_ratio_[pc_idx]:.1%} variance):")
    print("  " + "-" * 40)
    print("  Features les plus importantes:")
    for i, (feat, load) in enumerate(pc_loadings.head(3).items()):
        sign = "+" if loadings.loc[feat, pc_name] > 0 else "-"
        print(f"    {i+1}. {feat}: {sign}{load:.3f}")

print("\n" + "=" * 70)
print("3. VISUALISATION DES LOADINGS")
print("=" * 70)

fig, axes = plt.subplots(1, 2, figsize=(14, 6))

# PC1
colors_pc1 = ['#27ae60' if l > 0 else '#e74c3c' for l in loadings['PC1']]
axes[0].barh(feature_cols, loadings['PC1'], color=colors_pc1, edgecolor='black')
axes[0].set_xlabel('Loading')
axes[0].set_title(f'Contribution des features a PC1 ({pca.explained_variance_ratio_[0]:.1%})')
axes[0].axvline(x=0, color='black', linewidth=1)
axes[0].grid(True, alpha=0.3, axis='x')

# PC2
colors_pc2 = ['#27ae60' if l > 0 else '#e74c3c' for l in loadings['PC2']]
axes[1].barh(feature_cols, loadings['PC2'], color=colors_pc2, edgecolor='black')
axes[1].set_xlabel('Loading')
axes[1].set_title(f'Contribution des features a PC2 ({pca.explained_variance_ratio_[1]:.1%})')
axes[1].axvline(x=0, color='black', linewidth=1)
axes[1].grid(True, alpha=0.3, axis='x')

plt.tight_layout()
plt.show()

print("""
LEGENDE:
  • Barres vertes: contribution POSITIVE (augmente PC quand feature augmente)
  • Barres rouges: contribution NEGATIVE (diminue PC quand feature augmente)
  • Longueur: importance de la contribution
""")

print("\n" + "=" * 70)
print("4. BIPLOT (LOADINGS + DONNEES)")
print("=" * 70)

# Creer un biplot
X_pca = pca.transform(X_scaled)
y = df['category'].values

plt.figure(figsize=(10, 8))

# Donnees projetees
colors = {'A': '#9B7AC4', 'B': '#C09CF0', 'C': '#F7E64D'}
for cat in np.unique(y):
    mask = y == cat
    plt.scatter(X_pca[mask, 0], X_pca[mask, 1],
               c=colors[cat], label=f'Cat {cat}', alpha=0.5, s=50)

# Ajouter les vecteurs de loading (agrandis pour visibilite)
scale = 3
for i, feat in enumerate(feature_cols):
    plt.arrow(0, 0, loadings.iloc[i, 0]*scale, loadings.iloc[i, 1]*scale,
             head_width=0.1, head_length=0.05, fc='red', ec='red', alpha=0.7)
    plt.text(loadings.iloc[i, 0]*scale*1.1, loadings.iloc[i, 1]*scale*1.1,
            feat, fontsize=8, color='red')

plt.xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.1%})')
plt.ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.1%})')
plt.title('Biplot: Donnees + Loadings')
plt.legend()
plt.grid(True, alpha=0.3)
plt.axhline(y=0, color='black', linewidth=0.5)
plt.axvline(x=0, color='black', linewidth=0.5)
plt.show()

print("""
INTERPRETATION DU BIPLOT:
  • Points colores: echantillons projetes sur PC1/PC2
  • Fleches rouges: direction et importance de chaque feature
  • Fleches paralleles: features correlees
  • Fleches opposees: features anti-correlees
""")


# Exercice: PCA et classification
# Type: Exercice
# Exercice: Comparez la classification avant/apres PCA

from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier

# Preparez les donnees
feature_cols = [c for c in df.columns if c != 'category']
X = df[feature_cols].values
y = df['category'].values
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# TODO: Divisez en train/test
# TODO: Entrainez KNN sur les donnees originales (10 features)
# TODO: Appliquez PCA (2 composantes) et entrainez KNN
# TODO: Comparez les accuracies

