|
4 | 4 | import matplotlib.pyplot as plt
|
5 | 5 | import numpy as np
|
6 | 6 | import pandas as pd
|
| 7 | +import seaborn as sn |
7 | 8 | import torch
|
8 | 9 | import torch.nn as nn
|
9 | 10 | import torchmetrics
|
|
18 | 19 | from torch.utils.data import DataLoader
|
19 | 20 | from torchvision.datasets import CIFAR10
|
20 | 21 |
|
| 22 | +sn.set() |
| 23 | + |
21 | 24 | # %% [markdown]
|
22 | 25 | # ## Define Data Augmentations module
|
23 | 26 | #
|
@@ -100,11 +103,8 @@ def __init__(self):
|
100 | 103 | super().__init__()
|
101 | 104 | # not the best model: expereiment yourself
|
102 | 105 | self.model = torchvision.models.resnet18(pretrained=True)
|
103 |
| - |
104 | 106 | self.preprocess = Preprocess() # per sample transforms
|
105 |
| - |
106 | 107 | self.transform = DataAugmentation() # per batch augmentation_kornia
|
107 |
| - |
108 | 108 | self.train_accuracy = torchmetrics.Accuracy()
|
109 | 109 | self.val_accuracy = torchmetrics.Accuracy()
|
110 | 110 |
|
@@ -201,18 +201,12 @@ def val_dataloader(self):
|
201 | 201 |
|
202 | 202 | # %%
|
203 | 203 | metrics = pd.read_csv(f"{trainer.logger.log_dir}/metrics.csv")
|
204 |
| -print(metrics.head()) |
205 |
| - |
206 |
| -aggreg_metrics = [] |
207 |
| -agg_col = "epoch" |
208 |
| -for i, dfg in metrics.groupby(agg_col): |
209 |
| -agg = dict(dfg.mean()) |
210 |
| -agg[agg_col] = i |
211 |
| -aggreg_metrics.append(agg) |
212 |
| - |
213 |
| -df_metrics = pd.DataFrame(aggreg_metrics) |
214 |
| -df_metrics[["train_loss", "valid_loss"]].plot(grid=True, legend=True) |
215 |
| -df_metrics[["valid_acc", "train_acc"]].plot(grid=True, legend=True) |
| 204 | +del metrics["step"] |
| 205 | +metrics.set_index("epoch", inplace=True) |
| 206 | +print(metrics.dropna(axis=1, how="all").head()) |
| 207 | +g = sn.relplot(data=metrics, kind="line") |
| 208 | +plt.gcf().set_size_inches(12, 4) |
| 209 | +plt.grid() |
216 | 210 |
|
217 | 211 | # %% [markdown]
|
218 | 212 | # ## Tensorboard
|
|
0 commit comments