做营销看的网站河南建设工程一体化
2026/1/17 12:40:14 网站建设 项目流程
做营销看的网站,河南建设工程一体化,仿门户网站多功能js相册画廊源码,wordpress密文全球分布式大规模滑坡数据集数据集整合了来自 鲁定#xff08;Luding#xff09;、尼普斯#xff08;Nippes#xff09;、北海道#xff08;Hokkaido#xff09;、九寨沟#xff08;Jiuzhaigou#xff09;和米林#xff08;Mainling#xff09; 五个地震滑坡高发区域…全球分布式大规模滑坡数据集数据集整合了来自 鲁定Luding、尼普斯Nippes、北海道Hokkaido、九寨沟Jiuzhaigou和米林Mainling 五个地震滑坡高发区域的多源遥感影像涵盖了 无人机UAV航拍影像、PlanetScope、Gaofen-6高分六号卫星影像 以及 天地图Map World 数据。数据通过多源高分辨率遥感图像实现全球分布式滑坡样本的构建旨在支持地震诱发滑坡检测、分割及灾后评估等任务。整个数据集采用 TIFF 格式 存储包含 训练集train 与 验证集validation 两部分按照约 3:10.75:0.25比例 随机划分且每幅影像均不含重叠区域。数据集大小15621张共26.7G图像大小1024×1024图像分辨率0.5m-3m传感器UAV, Map WorldPlanetScope, Gaofen-6波段RGB图片格式:tiff滑坡地区Luding, Nippes, Hokkaido,Jiuzhaigou and Mainling全球分布式大规模滑坡数据集15,621 张 1024×1024 RGB GeoTIFF 图像含滑坡掩膜标签的完整训练代码基于语义分割任务使用PyTorch torchvision Albumentations segmentation_models_pytorch支持多源遥感影像输入适用于滑坡检测与灾后评估。✅ 一、任务说明任务类型二类语义分割滑坡 vs 非滑坡输入1024×1024 RGB GeoTIFF*.tif标签对应二值掩膜0背景1滑坡同样为*.tif模型建议U-Net、DeepLabV3、FPN推荐使用预训练编码器✅ 二、目录结构要求LandslideDataset/ ├── train/ │ ├── images/ # *.tif │ └── masks/ # *.tif (same name as image) └── val/ ├── images/ └── masks/示例train/images/Luding_001.tiftrain/masks/Luding_001.tif✅ 三、环境安装pipinstalltorch torchvision torchaudio pipinstallalbumentations opencv-python pillow pipinstallsegmentation-models-pytorch geopandas rasterio scikit-image pipinstalltqdm tensorboard matplotlibsegmentation-models-pytorch提供 SOTA 分割架构rasterio高效读取 GeoTIFFalbumentations遥感图像增强✅ 四、完整训练代码train_landslide.py# train_landslide.pyimportosimporttorchimporttorch.nnasnnfromtorch.utils.dataimportDataLoaderfromtorch.optimimportAdamWfromtorch.optim.lr_schedulerimportCosineAnnealingLRimportalbumentationsasAfromalbumentations.pytorchimportToTensorV2importsegmentation_models_pytorchassmpfromtqdmimporttqdmimportnumpyasnpimportrasteriofrompathlibimportPathimportargparsefromdatetimeimportdatetime# ----------------------------# Dataset Class# ----------------------------classLandslideDataset(torch.utils.data.Dataset):def__init__(self,image_dir,mask_dir,transformNone):self.image_pathssorted(Path(image_dir).glob(*.tif))self.mask_pathssorted(Path(mask_dir).glob(*.tif))assertlen(self.image_paths)len(self.mask_paths),Image and mask counts mismatch!self.transformtransformdef__len__(self):returnlen(self.image_paths)def__getitem__(self,idx):img_pathself.image_paths[idx]mask_pathself.mask_paths[idx]# 读取 GeoTIFF仅取前3波段withrasterio.open(img_path)assrc:imagesrc.read([1,2,3])# RGBimagenp.transpose(image,(1,2,0))# HWCwithrasterio.open(mask_path)assrc:masksrc.read(1)# 单通道# 确保 mask 是 0/1有些可能是 255 表示滑坡ifmask.max()1:mask(mask0).astype(np.uint8)# 数据增强ifself.transform:augmentedself.transform(imageimage,maskmask)imageaugmented[image]maskaugmented[mask]returnimage,mask.long()# mask 转为 LongTensor# ----------------------------# 训练函数# ----------------------------deftrain_one_epoch(model,dataloader,optimizer,criterion,device,epoch):model.train()total_loss0.0pbartqdm(dataloader,descfEpoch{epoch})forimages,masksinpbar:imagesimages.to(device)masksmasks.to(device)optimizer.zero_grad()outputsmodel(images)losscriterion(outputs,masks)loss.backward()optimizer.step()total_lossloss.item()pbar.set_postfix(lossloss.item())returntotal_loss/len(dataloader)# ----------------------------# 验证函数# ----------------------------defvalidate(model,dataloader,criterion,device):model.eval()total_loss0.0total_iou0.0count0withtorch.no_grad():forimages,masksintqdm(dataloader,descValidation):imagesimages.to(device)masksmasks.to(device)outputsmodel(images)losscriterion(outputs,masks)total_lossloss.item()# 计算 IoUpredstorch.argmax(outputs,dim1)intersection((preds1)(masks1)).sum((1,2))union((preds1)|(masks1)).sum((1,2))iou(intersection1e-6)/(union1e-6)total_iouiou.mean().item()count1avg_losstotal_loss/len(dataloader)avg_ioutotal_iou/countprint(f✅ Val Loss:{avg_loss:.4f}, mIoU:{avg_iou:.4f})returnavg_loss,avg_iou# ----------------------------# 主函数# ----------------------------defmain(args):devicetorch.device(cudaiftorch.cuda.is_available()elsecpu)print(fUsing device:{device})# 数据增强遥感专用train_transformA.Compose([A.RandomRotate90(p0.5),A.HorizontalFlip(p0.5),A.VerticalFlip(p0.5),A.OneOf([A.MotionBlur(p0.2),A.MedianBlur(blur_limit3,p0.1),A.Blur(blur_limit3,p0.1),],p0.2),A.CLAHE(p0.2),# 增强对比度适合遥感A.Normalize(mean[0.485,0.456,0.406],std[0.229,0.224,0.225]),# ImageNetToTensorV2(),])val_transformA.Compose([A.Normalize(mean[0.485,0.456,0.406],std[0.229,0.224,0.225]),ToTensorV2(),])# 数据集train_datasetLandslideDataset(image_diros.path.join(args.data_root,train/images),mask_diros.path.join(args.data_root,train/masks),transformtrain_transform)val_datasetLandslideDataset(image_diros.path.join(args.data_root,val/images),mask_diros.path.join(args.data_root,val/masks),transformval_transform)train_loaderDataLoader(train_dataset,batch_sizeargs.batch_size,shuffleTrue,num_workers4)val_loaderDataLoader(val_dataset,batch_sizeargs.batch_size,shuffleFalse,num_workers4)# 模型U-Net with ResNet34 encodermodelsmp.Unet(encoder_nameresnet34,encoder_weightsimagenet,in_channels3,classes2# 二分类background landslide).to(device)# 损失函数结合 Dice CrossEntropydice_losssmp.losses.DiceLoss(modemulticlass)ce_lossnn.CrossEntropyLoss()defcriterion(y_pred,y_true):returndice_loss(y_pred,y_true)ce_loss(y_pred,y_true)optimizerAdamW(model.parameters(),lrargs.lr,weight_decay1e-4)schedulerCosineAnnealingLR(optimizer,T_maxargs.epochs)best_iou0.0save_dirfruns/landslide_{datetime.now().strftime(%Y%m%d_%H%M)}os.makedirs(save_dir,exist_okTrue)# 训练循环forepochinrange(1,args.epochs1):train_losstrain_one_epoch(model,train_loader,optimizer,criterion,device,epoch)val_loss,val_iouvalidate(model,val_loader,criterion,device)scheduler.step()# 保存最佳模型ifval_ioubest_iou:best_iouval_iou torch.save(model.state_dict(),os.path.join(save_dir,best_model.pth))print(f New best mIoU:{best_iou:.4f}, saved!)# 日志withopen(os.path.join(save_dir,log.txt),a)asf:f.write(fEpoch{epoch}: Train Loss{train_loss:.4f}, Val Loss{val_loss:.4f}, mIoU{val_iou:.4f}\n)print(f✅ Training finished! Best mIoU:{best_iou:.4f})print(f Model saved to:{save_dir})if__name____main__:parserargparse.ArgumentParser()parser.add_argument(--data_root,typestr,default./LandslideDataset,helpDataset root directory)parser.add_argument(--batch_size,typeint,default8,helpBatch size (adjust based on GPU memory))parser.add_argument(--epochs,typeint,default50,helpNumber of training epochs)parser.add_argument(--lr,typefloat,default1e-3,helpLearning rate)argsparser.parse_args()main(args)✅ 五、运行命令示例python train_landslide.py\--data_root./LandslideDataset\--batch_size8\--epochs50\--lr0.001显存提示1024×1024 图像较大若 GPU 显存 16GB建议使用batch_size4或2或在train_transform中加入A.Resize(512, 512)进行下采样训练推理时再上采样✅ 六、模型选择建议模型优点适用场景U-Net (ResNet34)平衡精度与速度推荐默认选择DeepLabV3 (ResNet50)边界更精细高精度需求FPN (EfficientNet-b3)多尺度融合强小滑坡检测修改方式在main()中modelsmp.DeepLabV3Plus(encoder_nameresnet50,encoder_weightsimagenet,classes2).to(device)

需要专业的网站建设服务?

联系我们获取免费的网站建设咨询和方案报价,让我们帮助您实现业务目标

立即咨询