@article{chen_segmentation_2025, title = {A segmentation method for virtual clothing effect images}, volume = {76}, issn = {12225347}, url = {https://www.revistaindustriatextila.ro/images/2025/2/008%20RUIHONG%20CHEN%20INDUSTRIA%20TEXTILA%20no.2_2025.pdf}, doi = {10.35530/IT.076.02.2024111}, abstract = {Based on Efficient Channel Attention (ECA) mechanism, multi-level feature fusion and multi-scale channel attention mechanism, this paper proposes an improved VGG16-UNet clothing effect image segmentation method, commanded as EF-UNet, which aims to address the problems of insufficient semantic labels, poor local segmentation accuracy, and rough segmentation edges in clothing effect images. For this purpose, an ECA mechanism is first added to the fifth layer of the VGG16-UNet to assign greater weight and better extract target feature information to enhance the segmentation ability for clothing data. Subsequently, a multi-level feature fusion approach is adopted in a decoder to extract these features of various scales more efficiently and improve segmentation results. Finally, a multi-scale channel attention module is added to the skip connection to extract spatial information from multi-scale feature maps and to enable cross-dimensional interaction of salient visual features. Experimental findings show that the improved segmentation model has higher training indicators and better segmentation outcomes than similar networks such as FCN, U-Net, SegNet, PSPNet, DeepLabv3+, and VGG16-UNet semantic segmentation models. Compared with the original VGG16-UNet, the improved network has recorded an increase of 4.91\% in the Mean Intersection over Union (MIoU), an increase of 4.98\% in Mean Pixel Accuracy (MPA), and an increase of 0.43\% in Accuracy, respectively.}, number = {02}, urldate = {2025-04-26}, journal = {Industria Textila}, author = {Chen, Ruihong and Yu, Kaijie and Chen, Yu and Xu, Zengbo}, month = apr, year = {2025}, pages = {230--236}, }