__copyright__="""Machine Learning for Distributed Acoustic Sensing data (MLDAS)Copyright (c) 2020, The Regents of the University of California,through Lawrence Berkeley National Laboratory (subject to receipt ofany required approvals from the U.S. Dept. of Energy). All rights reserved.If you have questions about your rights to use or distribute this software,please contact Berkeley Lab's Intellectual Property Office atIPO@lbl.gov.NOTICE. This Software was developed under funding from the U.S. Departmentof Energy and the U.S. Government consequently retains certain rights. Assuch, the U.S. Government has been granted for itself and others acting onits behalf a paid-up, nonexclusive, irrevocable, worldwide license in theSoftware to reproduce, distribute copies to the public, prepare derivative works, and perform publicly and display publicly, and to permit others to do so."""__license__="Modified BSD license (see LICENSE.txt)"__maintainer__="Vincent Dumont"__email__="vincentdumont11@gmail.com"importtorch,numpy,copy
[docs]defsuplearn_simple(model,criterion,optimizer,train_loader,test_loader,epochs=1,print_every=1,save_model=False,verbose=True):""" Simple, non-optimized, supervised training with validation step performed at regular intervals during batch iteration for single node, single processor execution. Parameters ---------- model : :py:class:`torch.nn.Module` Trained model criterion : e.g. :py:class:`torch.nn.CrossEntropyLoss` Loss function optimizer : :py:class:`torch.optim.Optimizer` Optimizer to perform gradient descent train_loader : :py:class:`torch.utils.data.DataLoader` Input dataset for training part test_loader : :py:class:`torch.utils.data.DataLoader` Input dataset for validation step epochs : :py:class:`int` Number of epochs to execute the training print_every : :py:class:`int` Batch interval at which both training/validation loss and accuracy are evaluated save_model : :py:class:`bool` Save updated model in dictionary Returns ------- loss_hist : :py:class:`numpy.ndarray` History of loss values model : :py:class:`torch.nn.Module` or :py:class:`dict` Final trained model or dictionary of models. """# Initialize parametersmodels,loss_hist={},numpy.empty((0,8))# Loop over epochsforepochinrange(epochs):train_num,train_acc,train_loss=0,0,0.forbatch_idx,(inputs,labels)inenumerate(train_loader):model.train()optimizer.zero_grad()output=model(inputs)loss=criterion(output,labels)train_loss+=loss.item()*inputs.size(0)train_num+=inputs.size(0)_,predicted=output.max(1)train_acc+=predicted.eq(labels).sum().item()loss.backward()optimizer.step()if(batch_idx+1)%print_every==0:ifsave_model:models[len(loss_hist)]=copy.deepcopy(model)test_num,test_acc,test_loss=0,0,0.model.eval()withtorch.no_grad():forinputs,labelsintest_loader:output=model(inputs)loss=criterion(output,labels)test_loss+=loss.item()*inputs.size(0)test_num+=inputs.size(0)_,predicted=output.max(1)test_acc+=predicted.eq(labels).sum().item()loss_hist=numpy.vstack((loss_hist,[epoch,batch_idx,train_num,train_loss,train_acc,test_num,test_loss,test_acc]))ifverbose:print(f"Epoch {epoch+1:>3}/{epochs} | "+f"Batch {batch_idx+1:>3}/{len(train_loader)} | "f"Training loss: {train_loss/train_num:.5f} | "f"Training accuracy: {100*train_acc/train_num:>7.3f} ({train_acc}/{train_num}) | "f"Validation loss: {test_loss/test_num:.5f} | "f"Validation accuracy: {100*test_acc/test_num:>7.3f} ({test_acc}/{test_num})")train_num,train_acc,train_loss=0,0,0model=modelsifsave_modelelsemodelreturnloss_hist,model