Skip to content

refactor: explicit template for image classification #17

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Mar 16, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ jobs:
restore-keys: |
${{ steps.get-date.outputs.date }}-${{ runner.os }}-${{ matrix.python-version }}-

- run: pip install --pre -r requirements-dev.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html --progress-bar off
- run: pip install -r requirements.txt --progress-bar off
- run: pip install -r requirements-dev.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html --progress-bar off
- run: python -m torch.utils.collect_env
- run: bash .github/run_test.sh generate
- run: bash .github/run_test.sh unittest
Expand Down
4 changes: 2 additions & 2 deletions app/streamlit_app.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import shutil
from pathlib import Path
from datetime import datetime
from pathlib import Path

import streamlit as st
from codegen import CodeGenerator
Expand Down Expand Up @@ -51,7 +51,7 @@ def render_code(self, fname="", code="", fold=False):

def add_sidebar(self):
def config(template_name):
return import_from_file("template_config", f"./templates/{template_name}/{template_name}_config.py")
return import_from_file("template_config", f"./templates/{template_name}/sidebar.py")

self.sidebar(self.codegen.template_list, config)

Expand Down
15 changes: 2 additions & 13 deletions templates/base/base_config.py → templates/base/sidebar.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,6 @@
import streamlit as st

params = {
"amp_mode": {
"app": ["None", "amp", "apex"],
"test": ["None", "amp", "apex"],
},
"device": {
"app": ["cpu", "cuda", "xla"],
"test": ["cpu", "cuda"],
},
"data_path": {
"app": {"value": "./"},
"test": {"prefix": "tmp", "suffix": ""},
Expand All @@ -20,11 +12,11 @@
"test": {"prefix": "tmp", "suffix": ""},
},
"train_batch_size": {
"app": {"min_value": 1, "value": 1},
"app": {"min_value": 1, "value": 4},
"test": {"min_value": 1, "max_value": 2},
},
"eval_batch_size": {
"app": {"min_value": 1, "value": 1},
"app": {"min_value": 1, "value": 4},
"test": {"min_value": 1, "max_value": 2},
},
"num_workers": {
Expand Down Expand Up @@ -96,9 +88,6 @@ def get_configs() -> dict:
st.info("Common base training configurations. Those in the parenthesis are used in the code.")

# group by streamlit function type
config["amp_mode"] = st.selectbox("AMP mode (amp_mode)", params.amp_mode.app)
config["device"] = st.selectbox("Device to use (device)", params.device.app)

config["data_path"] = st.text_input("Dataset path (data_path)", **params.data_path.app)
config["filepath"] = st.text_input("Logging file path (filepath)", **params.filepath.app)

Expand Down
10 changes: 0 additions & 10 deletions templates/base/utils.py.jinja
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,6 @@ import torch

{% block get_default_parser %}
DEFAULTS = {
"amp_mode": {
"default": "{{ amp_mode }}",
"type": str,
"help": "automatic mixed precision mode to use: `amp` or `apex` ({{ amp_mode }})",
},
"train_batch_size": {
"default": {{ train_batch_size }},
"type": int,
Expand All @@ -23,11 +18,6 @@ DEFAULTS = {
"type": str,
"help": "datasets path ({{ data_path }})",
},
"device": {
"default": "{{ device }}",
"type": torch.device,
"help": "device to use for training / evaluation / testing ({{ device }})",
},
"filepath": {
"default": "{{ filepath }}",
"type": str,
Expand Down
1 change: 0 additions & 1 deletion templates/image_classification/fn.py.jinja

This file was deleted.

30 changes: 0 additions & 30 deletions templates/image_classification/generate_metadata.py

This file was deleted.

19 changes: 0 additions & 19 deletions templates/image_classification/image_classification_config.py

This file was deleted.

143 changes: 126 additions & 17 deletions templates/image_classification/main.py.jinja
Original file line number Diff line number Diff line change
@@ -1,34 +1,137 @@
{% extends "base/main.py.jinja" %}
{% block datasets_and_dataloaders %}
train_dataset, eval_dataset = get_datasets(root=config.data_path)
{% block imports %}
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from typing import Any

import logging

import ignite.distributed as idist
from ignite.engine import create_supervised_evaluator, create_supervised_trainer
from ignite.engine.events import Events
from ignite.utils import setup_logger, manual_seed
from ignite.metrics import Accuracy, Loss

from datasets import get_datasets, get_data_loaders
from utils import log_metrics, get_default_parser, initialize, setup_common_handlers, setup_exp_logging
{% endblock %}


{% block run %}
def run(local_rank: int, config: Any, *args: Any, **kwags: Any):

# -----------------------------
# datasets and dataloaders
# -----------------------------
{% block datasets_and_dataloaders %}
train_dataset, eval_dataset = get_datasets(config.data_path)
train_dataloader, eval_dataloader = get_data_loaders(
train_dataset=train_dataset,
eval_dataset=eval_dataset,
train_batch_size=config.train_batch_size,
eval_batch_size=config.eval_batch_size,
num_workers=config.num_workers,
)
{% endblock %}
{% endblock %}

{% block model_optimizer_loss %}
model = idist.auto_model(get_model(config.model_name))
optimizer = idist.auto_optim(optim.Adam(model.parameters(), lr=config.lr))
loss_fn = nn.CrossEntropyLoss()
{% endblock %}
# ------------------------------------------
# model, optimizer, loss function, device
# ------------------------------------------
{% block model_optimizer_loss %}
device, model, optimizer, loss_fn = initialize(config)
{% endblock %}

# ----------------------
# train / eval engine
# ----------------------
{% block engines %}
train_engine = create_supervised_trainer(
model=model,
optimizer=optimizer,
loss_fn=loss_fn,
device=device,
output_transform=lambda x, y, y_pred, loss: {'train_loss': loss.item()},
)
metrics = {
'eval_accuracy': Accuracy(device=device),
'eval_loss': Loss(loss_fn=loss_fn, device=device)
}
eval_engine = create_supervised_evaluator(
model=model,
metrics=metrics,
device=device,
)
{% endblock %}

{% block metrics %}
Accuracy(device=config.device).attach(eval_engine, "eval_accuracy")
# ---------------
# setup logging
# ---------------
{% block loggers %}
name = f"bs{config.train_batch_size}-lr{config.lr}-{optimizer.__class__.__name__}"
now = datetime.now().strftime("%Y%m%d-%X")
train_engine.logger = setup_logger("trainer", level=config.verbose, filepath=config.filepath / f"{name}-{now}.log")
eval_engine.logger = setup_logger("evaluator", level=config.verbose, filepath=config.filepath / f"{name}-{now}.log")
{% endblock %}

# -----------------------------------------
# checkpoint and common training handlers
# -----------------------------------------
{% block eval_ckpt_common_training %}
eval_ckpt_handler = setup_common_handlers(
config=config,
eval_engine=eval_engine,
train_engine=train_engine,
model=model,
optimizer=optimizer
)
{% endblock %}

# --------------------------------
# setup common experiment loggers
# --------------------------------
{% block exp_loggers %}
exp_logger = setup_exp_logging(
config=config,
eval_engine=eval_engine,
train_engine=train_engine,
optimizer=optimizer,
name=name
)
{% endblock %}

# ----------------------
# engines log and run
# ----------------------
{% block engines_run_and_log %}
{% block log_training_results %}
@train_engine.on(Events.ITERATION_COMPLETED(every=config.log_train))
def log_training_results(engine):
train_engine.state.metrics = train_engine.state.output
log_metrics(train_engine, "Train", device)
{% endblock %}

{% block run_eval_engine_and_log %}
@train_engine.on(Events.EPOCH_COMPLETED(every=config.log_eval))
def run_eval_engine_and_log(engine):
eval_engine.run(
eval_dataloader,
max_epochs=config.eval_max_epochs,
epoch_length=config.eval_epoch_length
)
log_metrics(eval_engine, "Eval", device)
{% endblock %}

train_engine.run(
train_dataloader,
max_epochs=config.train_max_epochs,
epoch_length=config.train_epoch_length
)
{% endblock %}
{% endblock %}

{% block main_fn %}
def main():
parser = ArgumentParser(parents=[get_default_parser()])
parser.add_argument(
"--model_name",
default="{{ model_name }}",
type=str,
help="Image classification model name ({{ model_name}})"
)
config = parser.parse_args()
manual_seed(config.seed)
config.verbose = logging.INFO if config.verbose else logging.WARNING
Expand All @@ -46,3 +149,9 @@ def main():
) as parallel:
parallel.run(run, config=config)
{% endblock %}


{% block entrypoint %}
if __name__ == "__main__":
main()
{% endblock %}
51 changes: 0 additions & 51 deletions templates/image_classification/metadata.json

This file was deleted.

Loading