@@ -291,202 +291,9 @@ if __name__ == "__main__":
291
291
292
292
</details >
293
293
294
- <details >
295
- <summary >FashionMNSIT</summary >
296
-
297
- ``` python
298
- import os
299
- import torch
300
- from torch import nn, optim, utils
301
- import torch.nn.functional as F
302
- from torchvision.datasets import FashionMNIST
303
- from torchvision.transforms import ToTensor
304
- import swanlab
305
-
306
-
307
- # ResNet网络构建
308
- class Basicblock (nn .Module ):
309
- def __init__ (self , in_planes , planes , stride = 1 ):
310
- super (Basicblock, self ).__init__ ()
311
- self .conv1 = nn.Sequential(
312
- nn.Conv2d(in_channels = in_planes, out_channels = planes, kernel_size = 3 , stride = stride, padding = 1 , bias = False ),
313
- nn.BatchNorm2d(planes),
314
- nn.ReLU()
315
- )
316
- self .conv2 = nn.Sequential(
317
- nn.Conv2d(in_channels = planes, out_channels = planes, kernel_size = 3 , stride = 1 , padding = 1 , bias = False ),
318
- nn.BatchNorm2d(planes),
319
- )
320
-
321
- if stride != 1 or in_planes != planes:
322
- self .shortcut = nn.Sequential(
323
- nn.Conv2d(in_channels = in_planes, out_channels = planes, kernel_size = 3 , stride = stride, padding = 1 ),
324
- nn.BatchNorm2d(planes)
325
- )
326
- else :
327
- self .shortcut = nn.Sequential()
328
-
329
- def forward (self , x ):
330
- out = self .conv1(x)
331
- out = self .conv2(out)
332
- out += self .shortcut(x)
333
- out = F.relu(out)
334
- return out
335
-
336
-
337
- class ResNet (nn .Module ):
338
- def __init__ (self , block , num_block , num_classes ):
339
- super (ResNet, self ).__init__ ()
340
- self .in_planes = 16
341
- self .conv1 = nn.Sequential(
342
- nn.Conv2d(in_channels = 1 , out_channels = 16 , kernel_size = 3 , stride = 1 , padding = 1 ),
343
- nn.BatchNorm2d(16 ),
344
- nn.ReLU()
345
- )
346
- self .maxpool = nn.MaxPool2d(kernel_size = 3 , stride = 1 , padding = 1 )
347
-
348
- self .block1 = self ._make_layer(block, 16 , num_block[0 ], stride = 1 )
349
- self .block2 = self ._make_layer(block, 32 , num_block[1 ], stride = 2 )
350
- self .block3 = self ._make_layer(block, 64 , num_block[2 ], stride = 2 )
351
- # self.block4 = self._make_layer(block, 512, num_block[3], stride=2)
352
-
353
- self .outlayer = nn.Linear(64 , num_classes)
354
-
355
- def _make_layer (self , block , planes , num_block , stride ):
356
- layers = []
357
- for i in range (num_block):
358
- if i == 0 :
359
- layers.append(block(self .in_planes, planes, stride))
360
- else :
361
- layers.append(block(planes, planes, 1 ))
362
- self .in_planes = planes
363
- return nn.Sequential(* layers)
364
-
365
- def forward (self , x ):
366
- x = self .maxpool(self .conv1(x))
367
- x = self .block1(x) # [200, 64, 28, 28]
368
- x = self .block2(x) # [200, 128, 14, 14]
369
- x = self .block3(x) # [200, 256, 7, 7]
370
- # out = self.block4(out)
371
- x = F.avg_pool2d(x, 7 ) # [200, 256, 1, 1]
372
- x = x.view(x.size(0 ), - 1 ) # [200,256]
373
- out = self .outlayer(x)
374
- return out
375
-
376
-
377
- # 捕获并可视化前20张图像
378
- def log_images (loader , num_images = 16 ):
379
- images_logged = 0
380
- logged_images = []
381
- for images, labels in loader:
382
- # images: batch of images, labels: batch of labels
383
- for i in range (images.shape[0 ]):
384
- if images_logged < num_images:
385
- # 使用swanlab.Image将图像转换为wandb可视化格式
386
- logged_images.append(swanlab.Image(images[i], caption = f " Label: { labels[i]} " , size = (128 , 128 )))
387
- images_logged += 1
388
- else :
389
- break
390
- if images_logged >= num_images:
391
- break
392
- swanlab.log({" Preview/MNIST" : logged_images})
393
-
394
-
395
- if __name__ == " __main__" :
396
- # 设置device
397
- try :
398
- use_mps = torch.backends.mps.is_available()
399
- except AttributeError :
400
- use_mps = False
401
-
402
- if torch.cuda.is_available():
403
- device = " cuda"
404
- elif use_mps:
405
- device = " mps"
406
- else :
407
- device = " cpu"
408
-
409
- # 初始化swanlab
410
- run = swanlab.init(
411
- project = " FashionMNIST" ,
412
- workspace = " SwanLab" ,
413
- experiment_name = " Resnet18-Adam" ,
414
- config = {
415
- " model" : " Resnet34" ,
416
- " optim" : " Adam" ,
417
- " lr" : 0.001 ,
418
- " batch_size" : 32 ,
419
- " num_epochs" : 10 ,
420
- " train_dataset_num" : 55000 ,
421
- " val_dataset_num" : 5000 ,
422
- " device" : device,
423
- " num_classes" : 10 ,
424
- },
425
- )
426
-
427
- # 设置训练机、验证集和测试集
428
- dataset = FashionMNIST(os.getcwd(), train = True , download = True , transform = ToTensor())
429
- train_dataset, val_dataset = utils.data.random_split(
430
- dataset, [run.config.train_dataset_num, run.config.val_dataset_num]
431
- )
432
-
433
- train_loader = utils.data.DataLoader(train_dataset, batch_size = run.config.batch_size, shuffle = True )
434
- val_loader = utils.data.DataLoader(val_dataset, batch_size = 1 , shuffle = False )
435
-
436
- # 初始化模型、损失函数和优化器
437
- if run.config.model == " Resnet18" :
438
- model = ResNet(Basicblock, [1 , 1 , 1 , 1 ], 10 )
439
- elif run.config.model == " Resnet34" :
440
- model = ResNet(Basicblock, [2 , 3 , 5 , 2 ], 10 )
441
- elif run.config.model == " Resnet50" :
442
- model = ResNet(Basicblock, [3 , 4 , 6 , 3 ], 10 )
294
+ [ BERT-IMDB] ( https://docs.swanlab.cn/zh/examples/bert.html )
443
295
444
- model.to(torch.device(device))
445
-
446
- criterion = nn.CrossEntropyLoss()
447
- optimizer = optim.Adam(model.parameters(), lr = run.config.lr)
448
-
449
- # (可选)看一下数据集的前16张图像
450
- log_images(train_loader, 16 )
451
-
452
- # 开始训练
453
- for epoch in range (1 , run.config.num_epochs + 1 ):
454
- swanlab.log({" train/epoch" : epoch}, step = epoch)
455
- # 训练循环
456
- for iter , batch in enumerate (train_loader):
457
- x, y = batch
458
- x, y = x.to(device), y.to(device)
459
- optimizer.zero_grad()
460
- output = model(x)
461
- loss = criterion(output, y)
462
- loss.backward()
463
- optimizer.step()
464
-
465
- if iter % 40 == 0 :
466
- print (
467
- f " Epoch [ { epoch} / { run.config.num_epochs} ], Iteration [ { iter + 1 } / { len (train_loader)} ], Loss: { loss.item()} "
468
- )
469
- swanlab.log({" train/loss" : loss.item()}, step = (epoch - 1 ) * len (train_loader) + iter )
470
-
471
- # 每4个epoch验证一次
472
- if epoch % 2 == 0 :
473
- model.eval()
474
- correct = 0
475
- total = 0
476
- with torch.no_grad():
477
- for batch in val_loader:
478
- x, y = batch
479
- x, y = x.to(device), y.to(device)
480
- output = model(x)
481
- _, predicted = torch.max(output, 1 )
482
- total += y.size(0 )
483
- correct += (predicted == y).sum().item()
484
-
485
- accuracy = correct / total
486
- swanlab.log({" val/accuracy" : accuracy}, step = epoch)
487
- ```
488
-
489
- </details >
296
+ [ YOLO] ( https://docs.swanlab.cn/zh/examples/yolo.html )
490
297
491
298
<br >
492
299
0 commit comments