Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ swanlog.bak/
dist/

# test
test/temp
temp/
tutils/config.json
tutils/package.mock.json

Expand All @@ -26,3 +26,13 @@ playground/
# python
__pycache__
.pytest_cache/

# test-integration-ultralytics
yolov8n-cls.pt
yolov8n.pt
yolov8n-pose.pt
yolov8n-seg.pt
runs/

# test-integration-lightning
LightningTest/
1 change: 1 addition & 0 deletions test/integration/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
temp/
19 changes: 19 additions & 0 deletions test/integration/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Integration

集成测试,并不会纳入单元测试,但是依旧需要保留在项目中,以保证可复现性。

> 不纳入单元测试的原因是集成测试需要使用一些外部资源,如数据集等,这些资源不适合在单元测试中使用。

有几个要点需要注意:

1. 通过框架名称区分不同的集成测试,如`fastai`等,如果框架名称和包同名,也应该取一个类似的名称来区分。
2. 所有的框加测试云端版使用开发环境
3. 测试的代码尽量简单,如果存在数据集,需附上数据集的下载链接并提供下载脚本
4. 测试代码应该使用框架自带的经典网络,不应该使用自定义的网络
5. 如果网络包含预训练模型,应该提供预训练模型的下载链接,如果网络本身需要下载,也应该提供下载链接
6. 每一个测试需包含requirements.txt文件,以便于安装所需依赖
7. 测试代码应该包含使用说明,例如README.md或者在主文件中使用头部注释——具体取决于测试的复杂程度

---

所有数据集、预训练模型等资源放在集成内的`temp`目录下,此目录不纳入版本控制。
28 changes: 28 additions & 0 deletions test/integration/fastai/fastai_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

from fastai.vision.all import *
from swanlab.integration.fastai import SwanLabCallback


# 加载数据
path = untar_data(URLs.PETS)
dls = ImageDataLoaders.from_name_re(
path, get_image_files(path / "images"), pat=r"([^/]+)_\d+.jpg$", item_tfms=Resize(224)
)

# 定义模型
learn = vision_learner(dls, resnet34, metrics=error_rate)

# 添加SwanLabCallback
learn.fit_one_cycle(
5,
cbs=SwanLabCallback(
project="fastai-swanlab-integration-test",
experiment_name="super-test",
description="Test fastai integration with swanlab",
logdir="./logs",
),
)
2 changes: 2 additions & 0 deletions test/integration/fastai/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
fastai
swanlab
3 changes: 3 additions & 0 deletions test/integration/huggingface/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
transformers
datasets
swanlab
57 changes: 57 additions & 0 deletions test/integration/huggingface/transformers_bert_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

import evaluate
import numpy as np
from swanlab.integration.huggingface import SwanLabCallback
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments


def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)


def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)


dataset = load_dataset("yelp_review_full")

tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")

tokenized_datasets = dataset.map(tokenize_function, batched=True)

small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))

metric = evaluate.load("accuracy")

model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)

training_args = TrainingArguments(
output_dir="test_trainer",
# 如果只需要用SwanLab跟踪实验,则将report_to参数设置为”none“
report_to="none",
num_train_epochs=3,
logging_steps=50,
)

# 实例化SwanLabCallback
swanlab_callback = SwanLabCallback(experiment_name="TransformersTest")

trainer = Trainer(
model=model,
args=training_args,
train_dataset=small_train_dataset,
eval_dataset=small_eval_dataset,
compute_metrics=compute_metrics,
# 传入callbacks参数
callbacks=[swanlab_callback],
)

trainer.train()
68 changes: 68 additions & 0 deletions test/integration/lightning/lightning_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import torch
from lightning import LightningModule
from torch.utils.data import Dataset


class RandomDataset(Dataset):
def __init__(self, size, num_samples):
self.len = num_samples
self.data = torch.randn(num_samples, size)

def __getitem__(self, index):
return self.data[index]

def __len__(self):
return self.len


class BoringModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
self.training_step_outputs = []
self.validation_step_outputs = []
self.test_step_outputs = []

def forward(self, x):
return self.layer(x)

def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))

def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]

def training_step(self, batch, _):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("loss", loss)
self.training_step_outputs.append(loss)
return loss

def on_train_epoch_end(self):
_ = torch.stack(self.training_step_outputs).mean()
self.training_step_outputs.clear() # free memory

def validation_step(self, batch, _):
output = self.layer(batch)
loss = self.loss(batch, output)
self.validation_step_outputs.append(loss)
return loss

def on_validation_epoch_end(self) -> None:
_ = torch.stack(self.validation_step_outputs).mean()
self.validation_step_outputs.clear() # free memory

def test_step(self, batch, _):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("fake_test_acc", loss)
self.test_step_outputs.append(loss)
return loss

def on_test_epoch_end(self) -> None:
_ = torch.stack(self.test_step_outputs).mean()
self.test_step_outputs.clear() # free memory
43 changes: 43 additions & 0 deletions test/integration/lightning/lightning_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

import os
from lightning import Trainer
from swanlab.integration.pytorch_lightning import SwanLabLogger
from tutils import open_dev_mode
from lightning_base import BoringModel, RandomDataset
from torch.utils.data import DataLoader


def main():
print("User process PID:", os.getpid())

# Set up data
num_samples = 100000
train = DataLoader(RandomDataset(32, num_samples), batch_size=32)
val = DataLoader(RandomDataset(32, num_samples), batch_size=32)
test = DataLoader(RandomDataset(32, num_samples), batch_size=32)

model = BoringModel()

swanlab_logger = SwanLabLogger(
project="LightningTest",
config={"num_samples": num_samples},
)

# Initialize a trainer
trainer = Trainer(
max_epochs=2,
logger=swanlab_logger,
)

# Train the model
trainer.fit(model, train, val)
trainer.test(dataloaders=test)


if __name__ == "__main__":
# swanlab.login(open_dev_mode())
main()
2 changes: 2 additions & 0 deletions test/integration/lightning/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
pytorch-lightning
swanlab
2 changes: 2 additions & 0 deletions test/integration/sb3/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
stable_baselines3
swanlab
41 changes: 41 additions & 0 deletions test/integration/sb3/sb3_PPO.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

import gymnasium as gym
from stable_baselines3 import PPO
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv
from swanlab.integration.sb3 import SwanLabCallback

config = {
"policy_type": "MlpPolicy",
"total_timesteps": 25000,
"env_name": "CartPole-v1",
}


def make_env():
env = gym.make(config["env_name"], render_mode="rgb_array")
env = Monitor(env)
return env


env = DummyVecEnv([make_env])
model = PPO(
config["policy_type"],
env,
verbose=1,
)

model.learn(
total_timesteps=config["total_timesteps"],
callback=SwanLabCallback(
project="PPO",
experiment_name="MlpPolicy",
verbose=2,
),
)

swanlab.finish()
2 changes: 2 additions & 0 deletions test/integration/ultralytics/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
ultralytics
swanlab
17 changes: 17 additions & 0 deletions test/integration/ultralytics/ultralytics_classifer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

from ultralytics import YOLO
from swanlab.integration.ultralytics import add_swanlab_callback


def main():
model = YOLO("yolov8n-cls.pt")
add_swanlab_callback(model)
model.train(data="mnist160", epochs=1, imgsz=64)


if __name__ == "__main__":
main()
17 changes: 17 additions & 0 deletions test/integration/ultralytics/ultralytics_detection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

from ultralytics import YOLO
from swanlab.integration.ultralytics import add_swanlab_callback


def main():
model = YOLO("yolov8n.pt")
add_swanlab_callback(model)
model.train(data="coco128.yaml", epochs=2, imgsz=640, batch=64)


if __name__ == "__main__":
main()
17 changes: 17 additions & 0 deletions test/integration/ultralytics/ultralytics_pose.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

from ultralytics import YOLO
from swanlab.integration.ultralytics import add_swanlab_callback


def main():
model = YOLO("yolov8n-pose.pt")
add_swanlab_callback(model)
model.train(data="coco8-pose.yaml", epochs=2, imgsz=640, batch=64)


if __name__ == "__main__":
main()
17 changes: 17 additions & 0 deletions test/integration/ultralytics/ultralytics_segmentation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from tutils import open_dev_mode
import swanlab

swanlab.login(open_dev_mode())

from ultralytics import YOLO
from swanlab.integration.ultralytics import add_swanlab_callback


def main():
model = YOLO("yolov8n-seg.pt")
add_swanlab_callback(model)
model.train(data="coco128-seg.yaml", epochs=2, imgsz=640, batch=64)


if __name__ == "__main__":
main()