This folder contains the image data that was used to generate the training data for label2label training, as well as the trained models that are shown in our paper: --- Lisa Sophie Kölln, Omar Salem, Jessica Valli, Carsten Gram Hansen, Gail McConnell; 'Label2label: Training a neural network to selectively restore cellular structures in fluorescence microscopy'. J Cell Sci 2022; jcs.258994. doi: https://doi.org/10.1242/jcs.258994 The example notebooks 'example.ipynb' and 'prediction.ipynb' outline how the CSBDeep Framework was used for this work. For more information about the CSBDeep Framework, please check out the original publication and github page: --- Weigert et al.; 'Content-aware image restoration: pushing the limits of fluorescence microscopy'. Nature Methods. doi: https://doi.org/10.1038/s41592-018-0216-7 --- https://github.com/CSBDeep/CSBDeep The following changes to the CSBDeep framework (version 0.6.3) allow the training with a multi-scale structural similarity loss function for image data with a data range between 0-1: >>> I.) add in 'csbdeep/internals/losses.py': import tensorflow as tf def loss_ssim(mean=True): R = _mean_or_not(mean) if backend_channels_last(): def ssim(y_true, y_pred): n = K.shape(y_true)[-1] return 1 - R( tf.image.ssim(y_true, y_pred[...,:n], 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) ) return ssim else: def ssim(y_true, y_pred): n = K.shape(y_true)[1] return 1 - R( tf.image.ssim(y_true, y_pred[:,:n,...], 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) ) return ssim def loss_ms_ssim(mean=True, no_weights=5, filter_size=11, filter_sigma=1.5, **kwargs): if no_weights == 5: msssim_weights = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333) elif no_weights == 4: msssim_weights = (0.11063, 0.33253, 0.33513, 0.2217) elif no_weights == 3: msssim_weights = (0.20964, 0.46591, 0.32446) elif no_weights == 2: msssim_weights = (0.40678, 0.59323) elif no_weights == 1: msssim_weights = (1) R = _mean_or_not(mean) if backend_channels_last(): def mssim(y_true, y_pred): n = K.shape(y_true)[-1] return 1 - R( tf.image.ssim_multiscale( y_true, y_pred[...,:n], 1.0, filter_sigma = filter_sigma, filter_size = filter_size, power_factors = msssim_weights ) ) return mssim else: def mssim(y_true, y_pred): n = K.shape(y_true)[1] return 1 - R( tf.image.ssim_multiscale( y_true, y_pred[:,:n,...], 1.0, filter_sigma = filter_sigma, filter_size = filter_size, power_factors = msssim_weights ) ) return mssim >>> II.1) add in line 166 in 'csbdeep/models/config': self.ms_ssim_no_weights = None self.ms_ssim_filter_size = None >>> II.2) add/replace in line 225ff in 'csbdeep/models/config': ok['train_loss'] = ( ( self.probabilistic and self.train_loss == 'laplace' ) or (not self.probabilistic and self.train_loss in ('mse','mae','ssim','ms_ssim')) ) >>> III.1) add/replace in line 5 in 'csbdeep/internals/train.py': from ..internals.losses import loss_laplace, loss_mse, loss_mae, loss_thresh_weighted_decay, loss_ssim, loss_ms_ssim >>> III.2) add/replace in line 35 in 'csbdeep/internals/train.py': def prepare_model(model, optimizer, loss, ms_ssim_no_weights, ms_ssim_filter_size, metrics=('mse','mae'), loss_bg_thresh=0, loss_bg_decay=0.06, Y=None): >>> III.3) add/replace in line 41 in 'csbdeep/internals/train.py': loss_standard = loss if loss == 'ms_ssim' else eval('loss_%s()'%loss) >>> III.4) add/replace in line 70ff in 'csbdeep/internals/train.py': if _loss == 'ms_ssim': model.compile(optimizer=optimizer, loss=loss_ms_ssim(no_weights=ms_ssim_no_weights, filter_size=ms_ssim_filter_size), metrics=_metrics) else: model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics) >>> IV) add/replace in line 110 in 'csbdeep/models/care_standard.py': self.callbacks = train.prepare_model(self.keras_model, optimizer, self.config.train_loss, self.config.ms_ssim_no_weights, self.config.ms_ssim_filter_size, **kwargs)