Introduce my name Fadli, I work as a bug bounty hunter, I found a vulnerability in an existing gitlab system which allows me to change the gitlab administrator/root password and also be able to view all source code and secret credentials. This finding is Critical, get shell to access server/linux operating system. please reply my email bfadliyanto@gmail.com

Commit 631b9800 authored by 朱婉杰's avatar 朱婉杰

Initial commit

parent cf90c01e
Pipeline #512 failed with stages

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.

# ignore map, miou, datasets
map_out/
miou_out/
VOCdevkit/
datasets/
Medical_Datasets/
lfw/
logs/
model_data/
.temp_miou_out/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
import numpy as np
import matplotlib.pyplot as plt
import pywt
from scipy.fft import fft2, ifft2, fftshift, ifftshift
import os
def fft2c(img):
"""Centered 2D FFT."""
return np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(img)))
def ifft2c(kspace):
"""Centered 2D IFFT."""
return np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace)))
def soft_threshold(x, lam):
"""软阈值操作"""
return np.sign(x) * np.maximum(np.abs(x) - lam, 0)
def total_variation(x, alpha):
"""计算图像的总变分"""
dx = np.diff(x, axis=0)
dy = np.diff(x, axis=1)
return alpha * (np.sum(np.abs(dx)) + np.sum(np.abs(dy)))
def reconstruct_image(kspace_data):
im = fftshift(ifft2(kspace_data, (256, 256)), axes=0)
# 取模得到实值图像
magnitude_image = np.abs(im)
# 归一化图像
normalized_image = (magnitude_image - magnitude_image.min()) / (magnitude_image.max() - magnitude_image.min())
return normalized_image
def gpm_mri_reconstruction(kspace_sampled, mask, lam, num_iters=2000, step_size=0.05, tv_alpha=0.05):
"""
使用梯度投影法(Gradient Projection Method, GPM)进行MRI重建,并引入总变分约束
:param kspace_sampled: 采样的k空间数据
:param mask: 采样掩码
:param lam: 正则化参数
:param num_iters: 迭代次数
:param step_size: 步长
:param tv_alpha: 总变分约束强度
:return: 重建的图像
"""
# 初始重建(零填充)
img_reconstructed = reconstruct_image(kspace_sampled)
# 显示初始化图像
plt.imshow(np.abs(img_reconstructed), cmap='gray')
plt.title('Initial Reconstructed MRI Image')
plt.show()
plt.imsave('initial_reconstructed_image.png', np.abs(img_reconstructed), cmap='gray')
for i in range(num_iters):
# 计算梯度:数据保真项的梯度
kspace_current = fft2c(img_reconstructed)
grad_data_fidelity = 2 * ifft2c((kspace_current - kspace_sampled) * mask)
# 计算梯度:稀疏项的梯度(使用小波变换)
coeffs = pywt.wavedec2(img_reconstructed, 'db4', level=3)
new_coeffs = []
for c in coeffs:
if isinstance(c, tuple):
c_soft = tuple([soft_threshold(x, lam) for x in c])
grad_sparse = [x - c_ for x, c_ in zip(c, c_soft)]
new_coeffs.append(grad_sparse)
else:
c_soft = soft_threshold(c, lam)
grad_sparse = c - c_soft
new_coeffs.append(grad_sparse)
grad_sparse_image = pywt.waverec2(new_coeffs, 'db4')
# 计算梯度:总变分项的梯度
grad_x, grad_y = np.gradient(img_reconstructed)
grad_tv = np.sqrt(grad_x ** 2 + grad_y ** 2)
grad_tv = tv_alpha * grad_tv
# 计算总的梯度
grad_total = grad_data_fidelity + grad_sparse_image + grad_tv
# 梯度下降步骤
img_temp = img_reconstructed - step_size * grad_total
# 投影步骤:保证满足k空间采样约束
kspace_temp = fft2c(img_temp)
kspace_reconstructed = kspace_sampled * mask + kspace_temp * (1 - mask)
img_reconstructed = reconstruct_image(kspace_reconstructed)
if (i + 1) % 10 == 0 or i == 0:
print(f"Iteration {i + 1}/{num_iters}")
return img_reconstructed
def main():
# 设置当前工作目录
current_dir = os.getcwd()
# 加载完整的k空间数据
kspace_path = os.path.join(current_dir, 'fid_20250418_163142.npy')
if not os.path.exists(kspace_path):
print(f"错误: 找不到文件 {kspace_path}")
return
try:
kspace_full = np.load(kspace_path)
print(f"成功加载k空间数据: {kspace_path}")
except Exception as e:
print(f"错误: 加载k空间数据时出错: {e}")
return
# 设置 GPM 参数
lam = 0.05 # 正则化参数,可以根据需要调整
num_iters = 10000 # 迭代次数
step_size = 0.05 # 步长,可以根据需要调整
tv_alpha = 0.1 # 总变分约束强度,可以根据需要调整
# 遍历当前目录下的所有.npy文件,排除'mrd_data.npy'
for filename in os.listdir(current_dir):
if filename.lower().endswith('.npy') and filename!= 'fid_20250418_163142.npy':
mask_path = os.path.join(current_dir, filename)
try:
mask = np.load(mask_path)
print(f"处理文件: {mask_path}")
except Exception as e:
print(f"错误: 加载文件 {mask_path} 时出错: {e}")
continue
# 检查kspace_full和mask的形状是否匹配
if kspace_full.shape!= mask.shape:
print(f"警告: 文件 {filename} 的形状与k空间数据不匹配,跳过。")
continue
# 采样k空间
kspace_sampled = kspace_full * mask
# 重建图像
try:
img_reconstructed = gpm_mri_reconstruction(kspace_sampled, mask, lam, num_iters, step_size, tv_alpha)
print(f"成功重建图像: {filename}")
except Exception as e:
print(f"错误: 重建文件 {filename} 时出错: {e}")
continue
# 构造保存的PNG文件名
base_name = os.path.splitext(filename)[0]
png_filename = f"{base_name}_gpm_reconstructed_fid_20250417_103329.png"
png_path = os.path.join(current_dir, png_filename)
plt.imshow(img_reconstructed, cmap='gray') # 设置为灰度图显示
plt.title('Reconstructed MRI Image')
plt.show()
plt.imsave(png_path, np.abs(img_reconstructed), cmap='gray')
if __name__ == "__main__":
main()
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
# 输入文件夹路径
input_folder = r'D:\Users\Desktop\img'
# 输出文件夹路径
output_folder = r'D:\Users\Desktop\output_images'
# 如果输出文件夹不存在,则创建它
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 遍历输入文件夹中的所有文件
for filename in os.listdir(input_folder):
if filename.endswith(('.jpg', '.png', '.jpeg')):
# 构建完整的文件路径
input_path = os.path.join(input_folder, filename)
# 读取图像
im2_uint8 = cv2.imread(input_path, cv2.IMREAD_GRAYSCALE)
# 创建自定义 LUT
lut = np.zeros((256, 1, 3), dtype=np.uint8)
for i in range(256):
if i < 32:
# 黑到红渐变
lut[i, 0, 2] = 255 # 红色通道
lut[i, 0, 0] = 0 # 蓝色通道
lut[i, 0, 1] = 0 # 绿色通道
else:
# 红到黄渐变(红+绿=黄)
lut[i, 0, 2] = 255 # 红色通道保持最大
lut[i, 0, 1] = (i - 128) * 2 # 绿色通道逐渐增加
# 转换为 BGR 并应用 LUT
pseudo_color1 = cv2.LUT(cv2.cvtColor(im2_uint8, cv2.COLOR_GRAY2BGR), lut)
# 构建输出文件路径
output_filename = os.path.splitext(filename)[0] + '_pseudo_color.jpg'
output_path = os.path.join(output_folder, output_filename)
# 保存处理后的图像
# cv2.imwrite(output_path, pseudo_color1)
# 使用 matplotlib 显示图像(可选)
#plt.imshow(pseudo_color1)
#plt.title('Pseudo Color Image')
#plt.axis('off') # 不显示坐标轴
#plt.show()
# 提取原始文件名(不包含扩展名)
base_name = os.path.splitext(filename)[0]
# 构建输出文件路径
output_filename = f'{base_name}_pseudo_color.png'
output_path = os.path.join(output_folder, output_filename)
# 使用 matplotlib 显示并保存图像
plt.imshow(pseudo_color1, cmap='viridis')
plt.gca().set_axis_off()
plt.savefig(output_path, bbox_inches='tight', pad_inches = 0)
plt.close() # 关闭当前图形,避免图形重叠
#0 COLORMAP_AUTUMN
#1 COLORMAP_BONE
#2 COLORMAP_JET
#3 COLORMAP_WINTER
#4 COLORMAP_RAINBOW
#5 COLORMAP_OCEAN
# COLORMAP_SUMMER
#7 COLORMAP_SPRING
#8 COLORMAP_COOL
#9 COLORMAP_HSV
#10 COLORMAP_PINK
#11 COLORMAP_HOT
#pseudo_color = cv2.applyColorMap(im2_uint8, cv2.COLORMAP_HSV)
#COLORMAP_JET:蓝-青-黄-红渐变(类似热力图)
#COLORMAP_HOT:黑-红-黄-白渐变
#COLORMAP_HSV:HSV色彩空间循环
#其他选项如COLORMAP_BONE, COLORMAP_COOL, COLORMAP_PINK等。
\ No newline at end of file
MIT License
Copyright (c) 2021 Bubbliiiing
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# U-net分割模型
## Unet:U-Net: Convolutional Networks for Biomedical Image Segmentation目标检测模型在Pytorch当中的实现
---
### 目录
1. [仓库更新 Top News](#仓库更新)
2. [相关仓库 Related code](#相关仓库)
3. [性能情况 Performance](#性能情况)
4. [所需环境 Environment](#所需环境)
5. [文件下载 Download](#文件下载)
6. [训练步骤 How2train](#训练步骤)
7. [预测步骤 How2predict](#预测步骤)
8. [评估步骤 miou](#评估步骤)
9. [参考资料 Reference](#Reference)
## Top News
**`2022-03`**:**进行大幅度更新、支持step、cos学习率下降法、支持adam、sgd优化器选择、支持学习率根据batch_size自适应调整。**
BiliBili视频中的原仓库地址为:https://github.com/bubbliiiing/unet-pytorch/tree/bilibili
**`2020-08`**:**创建仓库、支持多backbone、支持数据miou评估、标注数据处理、大量注释等。**
## 相关仓库
| 模型 | 路径 |
| :----- | :----- |
Unet | https://github.com/bubbliiiing/unet-pytorch
PSPnet | https://github.com/bubbliiiing/pspnet-pytorch
deeplabv3+ | https://github.com/bubbliiiing/deeplabv3-plus-pytorch
### 性能情况
**unet并不适合VOC此类数据集,其更适合特征少,需要浅层特征的医药数据集之类的。**
| 训练数据集 | 权值文件名称 | 测试数据集 | 输入图片大小 | mIOU |
| :-----: | :-----: | :------: | :------: | :------: |
| VOC12+SBD | [unet_vgg_voc.pth](https://github.com/bubbliiiing/unet-pytorch/releases/download/v1.0/unet_vgg_voc.pth) | VOC-Val12 | 512x512| 58.78 |
| VOC12+SBD | [unet_resnet_voc.pth](https://github.com/bubbliiiing/unet-pytorch/releases/download/v1.0/unet_resnet_voc.pth) | VOC-Val12 | 512x512| 67.53 |
### 所需环境
torch==1.2.0
torchvision==0.4.0
### 文件下载
训练所需的权值可在百度网盘中下载。
链接: https://pan.baidu.com/s/1A22fC5cPRb74gqrpq7O9-A
提取码: 6n2c
VOC拓展数据集的百度网盘如下:
链接: https://pan.baidu.com/s/1vkk3lMheUm6IjTXznlg7Ng
提取码: 44mk
### 训练步骤
#### 一、训练voc数据集
1、将我提供的voc数据集放入VOCdevkit中(无需运行voc_annotation.py)。
2、运行train.py进行训练,默认参数已经对应voc数据集所需要的参数了。
#### 二、训练自己的数据集
1、本文使用VOC格式进行训练。
2、训练前将标签文件放在VOCdevkit文件夹下的VOC2007文件夹下的SegmentationClass中。
3、训练前将图片文件放在VOCdevkit文件夹下的VOC2007文件夹下的JPEGImages中。
4、在训练前利用voc_annotation.py文件生成对应的txt。
5、注意修改train.py的num_classes为分类个数+1。
6、运行train.py即可开始训练。
#### 三、训练医药数据集
1、下载VGG的预训练权重到model_data下面。
2、按照默认参数运行train_medical.py即可开始训练。
### 预测步骤
#### 一、使用预训练权重
##### a、VOC预训练权重
1. 下载完库后解压,如果想要利用voc训练好的权重进行预测,在百度网盘或者release下载权值,放入model_data,运行即可预测。
```python
img/street.jpg
```
2. 在predict.py里面进行设置可以进行fps测试和video视频检测。
##### b、医药预训练权重
1. 下载完库后解压,如果想要利用医药数据集训练好的权重进行预测,在百度网盘或者release下载权值,放入model_data,修改unet.py中的model_path和num_classes;
```python
_defaults = {
#-------------------------------------------------------------------#
# model_path指向logs文件夹下的权值文件
# 训练好后logs文件夹下存在多个权值文件,选择验证集损失较低的即可。
# 验证集损失较低不代表miou较高,仅代表该权值在验证集上泛化性能较好。
#-------------------------------------------------------------------#
"model_path" : 'model_data/unet_vgg_medical.pth',
#--------------------------------#
# 所需要区分的类的个数+1
#--------------------------------#
"num_classes" : 2,
#--------------------------------#
# 所使用的的主干网络:vgg、resnet50
#--------------------------------#
"backbone" : "vgg",
#--------------------------------#
# 输入图片的大小
#--------------------------------#
"input_shape" : [512, 512],
#--------------------------------#
# blend参数用于控制是否
# 让识别结果和原图混合
#--------------------------------#
"blend" : True,
#--------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#--------------------------------#
"cuda" : True,
}
```
2. 运行即可预测。
```python
img/cell.png
```
#### 二、使用自己训练的权重
1. 按照训练步骤训练。
2. 在unet.py文件里面,在如下部分修改model_path、backbone和num_classes使其对应训练好的文件;**model_path对应logs文件夹下面的权值文件**
```python
_defaults = {
#-------------------------------------------------------------------#
# model_path指向logs文件夹下的权值文件
# 训练好后logs文件夹下存在多个权值文件,选择验证集损失较低的即可。
# 验证集损失较低不代表miou较高,仅代表该权值在验证集上泛化性能较好。
#-------------------------------------------------------------------#
"model_path" : 'model_data/unet_vgg_voc.pth',
#--------------------------------#
# 所需要区分的类的个数+1
#--------------------------------#
"num_classes" : 21,
#--------------------------------#
# 所使用的的主干网络:vgg、resnet50
#--------------------------------#
"backbone" : "vgg",
#--------------------------------#
# 输入图片的大小
#--------------------------------#
"input_shape" : [512, 512],
#--------------------------------#
# blend参数用于控制是否
# 让识别结果和原图混合
#--------------------------------#
"blend" : True,
#--------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#--------------------------------#
"cuda" : True,
}
```
3. 运行predict.py,输入
```python
img/street.jpg
```
4. 在predict.py里面进行设置可以进行fps测试和video视频检测。
### 评估步骤
1、设置get_miou.py里面的num_classes为预测的类的数量加1。
2、设置get_miou.py里面的name_classes为需要去区分的类别。
3、运行get_miou.py即可获得miou大小。
## Reference
https://github.com/ggyyzm/pytorch_segmentation
https://github.com/bonlime/keras-deeplab-v3-plus
This diff is collapsed.
import numpy as np
import matplotlib.pyplot as plt
import pywt
from scipy.fft import fft2, ifft2, fftshift, ifftshift
import os
def fft2c(img):
"""Centered 2D FFT."""
return np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(img)))
def ifft2c(kspace):
"""Centered 2D IFFT."""
return np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace)))
def soft_threshold(x, lam):
"""软阈值操作"""
return np.sign(x) * np.maximum(np.abs(x) - lam, 0)
def total_variation(x, alpha):
"""计算图像的总变分"""
dx = np.diff(x, axis=0)
dy = np.diff(x, axis=1)
return alpha * (np.sum(np.abs(dx)) + np.sum(np.abs(dy)))
def reconstruct_image(kspace_data):
im = fftshift(ifft2(kspace_data, (256, 256)), axes=0)
# 取模得到实值图像
magnitude_image = np.abs(im)
# 归一化图像
normalized_image = (magnitude_image - magnitude_image.min()) / (magnitude_image.max() - magnitude_image.min())
return normalized_image
def fista_mri_reconstruction(kspace_sampled, mask, lam, num_iters=2000, step_size=0.05, tv_alpha=0.05, wavelet='db4'):
"""
使用快速迭代软阈值算法FISTA进行MRI重建,并引入总变分约束
:param kspace_sampled: 采样的k空间数据
:param mask: 采样掩码
:param lam: 正则化参数
:param num_iters: 迭代次数
:param step_size: 步长
:param tv_alpha: 总变分约束强度
:param wavelet: 小波基名称,默认为'db4'
:return: 重建的图像
"""
# 初始重建(零填充)
img_reconstructed = reconstruct_image(kspace_sampled)
# 显示初始化图像
plt.imshow(np.abs(img_reconstructed), cmap='gray')
plt.title('Initial Reconstructed MRI Image')
plt.show()
plt.imsave('initial_reconstructed_image.png', np.abs(img_reconstructed), cmap='gray')
y = img_reconstructed.copy()
t = 1
for i in range(num_iters):
# 将当前图像转换到稀疏域(使用离散小波变换 DWT)
coeffs = pywt.wavedec2(y, wavelet, level=3)
new_coeffs = []
for c in coeffs:
if isinstance(c, tuple):
c = tuple([soft_threshold(x, lam * step_size) for x in c])
else:
c = soft_threshold(c, lam * step_size)
new_coeffs.append(c)
# 将稀疏系数转换回图像域
img_temp = pywt.waverec2(new_coeffs, wavelet)
# 引入总变分约束
grad_tv = np.gradient(img_temp)
grad_tv = np.sqrt(grad_tv[0]**2 + grad_tv[1]**2)
img_temp = img_temp - step_size * tv_alpha * grad_tv
# 数据一致性步骤
kspace_reconstructed = fft2c(img_temp)
kspace_reconstructed = kspace_sampled * mask + kspace_reconstructed * (1 - mask)
img_new = reconstruct_image(kspace_reconstructed)
t_new = (1 + np.sqrt(1 + 4 * t ** 2)) / 2
y = img_new + ((t - 1) / t_new) * (img_new - img_reconstructed)
img_reconstructed = img_new
t = t_new
if (i + 1) % 10 == 0 or i == 0:
print(f"Iteration {i + 1}/{num_iters}")
return img_reconstructed
def main():
# 设置当前工作目录
current_dir = os.getcwd()
# 加载完整的k空间数据
kspace_path = os.path.join(current_dir, 'fid_20250418_163142.npy')
if not os.path.exists(kspace_path):
print(f"错误: 找不到文件 {kspace_path}")
return
try:
kspace_full = np.load(kspace_path)
print(f"成功加载k空间数据: {kspace_path}")
except Exception as e:
print(f"错误: 加载k空间数据时出错: {e}")
return
# 设置 FISTA 参数
lam = 0.05 # 正则化参数,可以根据需要调整
num_iters = 10000 # 迭代次数
step_size = 0.05 # 步长,可以根据需要调整
tv_alpha = 0.1 # 总变分约束强度,可以根据需要调整
# 定义要尝试的小波基列表
#wavelets = ['haar','sym4']
wavelets = ['haar']
# 遍历当前目录下的所有.npy文件,排除'mrd_data.npy'
for filename in os.listdir(current_dir):
if filename.lower().endswith('.npy') and filename!= 'fid_20250418_163142.npy':
mask_path = os.path.join(current_dir, filename)
try:
mask = np.load(mask_path)
print(f"处理文件: {mask_path}")
except Exception as e:
print(f"错误: 加载文件 {mask_path} 时出错: {e}")
continue
# 检查kspace_full和mask的形状是否匹配
if kspace_full.shape!= mask.shape:
print(f"警告: 文件 {filename} 的形状与k空间数据不匹配,跳过。")
continue
# 采样k空间
kspace_sampled = kspace_full * mask
for wavelet in wavelets:
# 重建图像
try:
img_reconstructed = fista_mri_reconstruction(kspace_sampled, mask, lam, num_iters, step_size, tv_alpha)
print(f"使用{wavelet}小波基成功重建图像: {filename}")
except Exception as e:
print(f"错误: 使用{wavelet}小波基重建文件 {filename} 时出错: {e}")
continue
# 构造保存的PNG文件名,包含小波基名称
base_name = os.path.splitext(filename)[0]
png_filename = f"{base_name}_{wavelet}_fista_reconstructed_fid_20250417_103329.png"
png_path = os.path.join(current_dir, png_filename)
plt.imshow(img_reconstructed, cmap='gray') # 设置为灰度图显示
plt.title(f'Reconstructed MRI Image with {wavelet} wavelet')
plt.show()
plt.imsave(png_path, np.abs(img_reconstructed), cmap='gray')
if __name__ == "__main__":
main()
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import dct, idct
import os
def fft2c(img):
"""Centered 2D FFT."""
return np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(img)))
def ifft2c(kspace):
"""Centered 2D IFFT."""
return np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace)))
def generate_sampling_mask(shape, sampling_rate):
"""生成随机采样掩码"""
num_samples = int(sampling_rate * shape[0] * shape[1])
mask = np.zeros(shape, dtype=np.float32)
indices = np.random.choice(shape[0] * shape[1], num_samples, replace=False)
mask.flat[indices] = 1
return mask
def dct2(a):
"""二维离散余弦变换"""
return dct(dct(a.T, norm='ortho').T, norm='ortho')
def idct2(a):
"""二维离散余弦逆变换"""
return idct(idct(a.T, norm='ortho').T, norm='ortho')
def soft_threshold(x, lam):
"""软阈值操作"""
return np.sign(x) * np.maximum(np.abs(x) - lam, 0)
def admm_mri_reconstruction(kspace_sampled, mask, lam, rho=1.0, num_iters=100):
"""
使用交替方向乘子法进行 MRI 重建
:param kspace_sampled: 采样的 k 空间数据
:param mask: 采样掩码
:param lam: 正则化参数
:param rho: 惩罚参数
:param num_iters: 迭代次数
:return: 重建的图像
"""
# 初始重建(零填充)
img_reconstructed = ifft2c(kspace_sampled)
z = dct2(img_reconstructed)
u = np.zeros_like(z)
for i in range(num_iters):
# 更新 x
kspace_x = kspace_sampled * mask + fft2c(ifft2c(idct2(z - u))) * (1 - mask)
img_reconstructed = ifft2c(kspace_x)
# 更新 z
sparse_coeff = dct2(img_reconstructed) + u
z = soft_threshold(sparse_coeff, lam / rho)
# 更新 u
u = u + (dct2(img_reconstructed) - z)
if (i + 1) % 10 == 0 or i == 0:
print(f"Iteration {i + 1}/{num_iters}")
return np.abs(img_reconstructed)
def main():
# 设置当前工作目录
current_dir = os.getcwd()
# 加载完整的 k 空间数据
kspace_path = os.path.join(current_dir, 'mrd_data1.npy')
if not os.path.exists(kspace_path):
print(f"错误: 找不到文件 {kspace_path}")
return
try:
kspace_full = np.load(kspace_path)
print(f"成功加载 k 空间数据: {kspace_path}")
except Exception as e:
print(f"错误: 加载 k 空间数据时出错: {e}")
return
# 设置 ADMM 参数
lam = 0.1 # 正则化参数,可以根据需要调整
rho = 1.0 # 惩罚参数
num_iters = 100 # 迭代次数
# 遍历当前目录下的所有 .npy 文件,排除 'mrd_data.npy'
for filename in os.listdir(current_dir):
if filename.lower().endswith('.npy') and filename != 'mrd_data.npy':
mask_path = os.path.join(current_dir, filename)
try:
mask = np.load(mask_path)
print(f"处理文件: {mask_path}")
except Exception as e:
print(f"错误: 加载文件 {mask_path} 时出错: {e}")
continue
# 检查 kspace_full 和 mask 的形状是否匹配
if kspace_full.shape != mask.shape:
print(f"警告: 文件 {filename} 的形状与 k 空间数据不匹配,跳过。")
continue
# 采样 k 空间
kspace_sampled = kspace_full * mask
# 重建图像
try:
img_reconstructed = admm_mri_reconstruction(kspace_sampled, mask, lam, rho, num_iters)
print(f"成功重建图像: {filename}")
except Exception as e:
print(f"错误: 使用 ADMM 重建文件 {filename} 时出错: {e}")
continue
# 构造保存的 PNG 文件名
base_name = os.path.splitext(filename)[0]
png_filename = f"{base_name}_ADMM_reconstructed.png"
png_path = os.path.join(current_dir, png_filename)
plt.imshow(img_reconstructed)
plt.axis('off') # 取消坐标轴
plt.tight_layout(pad=0)
plt.savefig(png_path, dpi=300) # 保存为 PNG 文件
if __name__ == "__main__":
main()
\ No newline at end of file
import numpy as np
import matplotlib.pyplot as plt
import pywt
from scipy.fft import fft2, ifft2, fftshift, ifftshift
import os
def fft2c(img):
"""Centered 2D FFT."""
return np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(img)))
def ifft2c(kspace):
"""Centered 2D IFFT."""
return np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace)))
def soft_threshold(x, lam):
"""软阈值操作"""
return np.sign(x) * np.maximum(np.abs(x) - lam, 0)
def total_variation(x, alpha):
"""计算图像的总变分"""
dx = np.diff(x, axis=0)
dy = np.diff(x, axis=1)
return alpha * (np.sum(np.abs(dx)) + np.sum(np.abs(dy)))
def reconstruct_image(kspace_data):
im = fftshift(ifft2(kspace_data, (256, 256)), axes=0)
# 取模得到实值图像
magnitude_image = np.abs(im)
# 归一化图像
normalized_image = (magnitude_image - magnitude_image.min()) / (magnitude_image.max() - magnitude_image.min())
return normalized_image
def split_bregman_mri_reconstruction(kspace_sampled, mask, lam, num_iters=2000, step_size=0.05, tv_alpha=0.05):
"""
使用分裂布雷格曼算法(Split Bregman)进行MRI重建,并引入总变分约束
:param kspace_sampled: 采样的k空间数据
:param mask: 采样掩码
:param lam: 正则化参数
:param num_iters: 迭代次数
:param step_size: 步长
:param tv_alpha: 总变分约束强度
:return: 重建的图像
"""
# 初始重建(零填充)
img_reconstructed = reconstruct_image(kspace_sampled)
# 显示初始化图像
plt.imshow(np.abs(img_reconstructed), cmap='gray')
plt.title('Initial Reconstructed MRI Image')
plt.show()
plt.imsave('initial_reconstructed_image.png', np.abs(img_reconstructed), cmap='gray')
# 初始化布雷格曼变量
b1 = np.zeros_like(img_reconstructed)
b2 = np.zeros_like(img_reconstructed)
for i in range(num_iters):
# 数据一致性步骤
kspace_reconstructed = fft2c(img_reconstructed)
kspace_reconstructed = kspace_sampled * mask + kspace_reconstructed * (1 - mask)
img_temp = reconstruct_image(kspace_reconstructed)
# 计算总变分
grad_tv = np.gradient(img_temp)
grad_tv = np.sqrt(grad_tv[0] ** 2 + grad_tv[1] ** 2)
# 更新图像
img_reconstructed = img_temp - step_size * (tv_alpha * grad_tv + b1 + b2)
# 更新布雷格曼变量
b1 = b1 + step_size * (grad_tv - soft_threshold(img_reconstructed + b1, lam))
b2 = b2 + step_size * (grad_tv - soft_threshold(img_reconstructed + b2, lam))
if (i + 1) % 10 == 0 or i == 0:
print(f"Iteration {i + 1}/{num_iters}")
return img_reconstructed
def main():
# 设置当前工作目录
current_dir = os.getcwd()
# 加载完整的k空间数据
kspace_path = os.path.join(current_dir, 'fid_20250418_163142.npy')
if not os.path.exists(kspace_path):
print(f"错误: 找不到文件 {kspace_path}")
return
try:
kspace_full = np.load(kspace_path)
print(f"成功加载k空间数据: {kspace_path}")
except Exception as e:
print(f"错误: 加载k空间数据时出错: {e}")
return
# 设置分裂布雷格曼算法参数
lam = 0.05 # 正则化参数,可以根据需要调整
num_iters = 10000 # 迭代次数
step_size = 0.05 # 步长,可以根据需要调整
tv_alpha = 0.1 # 总变分约束强度,可以根据需要调整
# 遍历当前目录下的所有.npy文件,排除'mrd_data.npy'
for filename in os.listdir(current_dir):
if filename.lower().endswith('.npy') and filename!= 'fid_20250418_163142.npy':
mask_path = os.path.join(current_dir, filename)
try:
mask = np.load(mask_path)
print(f"处理文件: {mask_path}")
except Exception as e:
print(f"错误: 加载文件 {mask_path} 时出错: {e}")
continue
# 检查kspace_full和mask的形状是否匹配
if kspace_full.shape!= mask.shape:
print(f"警告: 文件 {filename} 的形状与k空间数据不匹配,跳过。")
continue
# 采样k空间
kspace_sampled = kspace_full * mask
# 重建图像
try:
img_reconstructed = split_bregman_mri_reconstruction(kspace_sampled, mask, lam, num_iters, step_size, tv_alpha)
print(f"成功重建图像: {filename}")
except Exception as e:
print(f"错误: 重建文件 {filename} 时出错: {e}")
continue
# 构造保存的PNG文件名
base_name = os.path.splitext(filename)[0]
png_filename = f"{base_name}_split_bregman_reconstructed_fid_20250417_103329.png"
png_path = os.path.join(current_dir, png_filename)
plt.imshow(img_reconstructed, cmap='gray') # 设置为灰度图显示
plt.title('Reconstructed MRI Image')
plt.show()
plt.imsave(png_path, np.abs(img_reconstructed), cmap='gray')
if __name__ == "__main__":
main()
import numpy as np
import matplotlib.pyplot as plt
import pywt
from scipy.fft import fft2, ifft2, fftshift, ifftshift
import os
def fft2c(img):
"""Centered 2D FFT."""
return np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(img)))
def ifft2c(kspace):
"""Centered 2D IFFT."""
return np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace)))
def soft_threshold(x, lam):
"""软阈值操作"""
return np.sign(x) * np.maximum(np.abs(x) - lam, 0)
def total_variation(x, alpha):
"""计算图像的总变分"""
dx = np.diff(x, axis=0)
dy = np.diff(x, axis=1)
return alpha * (np.sum(np.abs(dx)) + np.sum(np.abs(dy)))
def reconstruct_image(kspace_data):
im = fftshift(ifft2(kspace_data, (256, 256)), axes=0)
# 取模得到实值图像
magnitude_image = np.abs(im)
# 归一化图像
normalized_image = (magnitude_image - magnitude_image.min()) / (magnitude_image.max() - magnitude_image.min())
return normalized_image
def ista_mri_reconstruction(kspace_sampled, mask, lam, num_iters=2000, step_size=0.05, tv_alpha=0.05):
"""
使用迭代收缩阈值算法(ISTA)进行MRI重建,并引入总变分约束
:param kspace_sampled: 采样的k空间数据
:param mask: 采样掩码
:param lam: 正则化参数
:param num_iters: 迭代次数
:param step_size: 步长
:param tv_alpha: 总变分约束强度
:return: 重建的图像
"""
# 初始重建(零填充)
img_reconstructed = reconstruct_image(kspace_sampled)
# 显示初始化图像
plt.imshow(np.abs(img_reconstructed), cmap='gray')
plt.title('Initial Reconstructed MRI Image')
plt.show()
plt.imsave('initial_reconstructed_image.png', np.abs(img_reconstructed), cmap='gray')
for i in range(num_iters):
# 数据一致性步骤
kspace_reconstructed = fft2c(img_reconstructed)
kspace_reconstructed = kspace_sampled * mask + kspace_reconstructed * (1 - mask)
img_temp = reconstruct_image(kspace_reconstructed)
# 将当前图像转换到稀疏域(使用离散小波变换 DWT)
coeffs = pywt.wavedec2(img_temp, 'db4', level=3)
new_coeffs = []
for c in coeffs:
if isinstance(c, tuple):
c = tuple([soft_threshold(x, lam * step_size) for x in c])
else:
c = soft_threshold(c, lam * step_size)
new_coeffs.append(c)
# 将稀疏系数转换回图像域
img_reconstructed = pywt.waverec2(new_coeffs, 'db4')
# 引入总变分约束
grad_tv = np.gradient(img_reconstructed)
grad_tv = np.sqrt(grad_tv[0] ** 2 + grad_tv[1] ** 2)
img_reconstructed = img_reconstructed - step_size * tv_alpha * grad_tv
if (i + 1) % 10 == 0 or i == 0:
print(f"Iteration {i + 1}/{num_iters}")
return img_reconstructed
def main():
# 设置当前工作目录
current_dir = os.getcwd()
# 加载完整的k空间数据
kspace_path = os.path.join(current_dir, 'fid_20250418_163142.npy')
if not os.path.exists(kspace_path):
print(f"错误: 找不到文件 {kspace_path}")
return
try:
kspace_full = np.load(kspace_path)
print(f"成功加载k空间数据: {kspace_path}")
except Exception as e:
print(f"错误: 加载k空间数据时出错: {e}")
return
# 设置 ISTA 参数
lam = 0.05 # 正则化参数,可以根据需要调整
num_iters = 10000 # 迭代次数
step_size = 0.05 # 步长,可以根据需要调整
tv_alpha = 0.1 # 总变分约束强度,可以根据需要调整
# 遍历当前目录下的所有.npy文件,排除'mrd_data.npy'
for filename in os.listdir(current_dir):
if filename.lower().endswith('.npy') and filename!= 'fid_20250418_163142.npy':
mask_path = os.path.join(current_dir, filename)
try:
mask = np.load(mask_path)
print(f"处理文件: {mask_path}")
except Exception as e:
print(f"错误: 加载文件 {mask_path} 时出错: {e}")
continue
# 检查kspace_full和mask的形状是否匹配
if kspace_full.shape!= mask.shape:
print(f"警告: 文件 {filename} 的形状与k空间数据不匹配,跳过。")
continue
# 采样k空间
kspace_sampled = kspace_full * mask
# 重建图像
try:
img_reconstructed = ista_mri_reconstruction(kspace_sampled, mask, lam, num_iters, step_size, tv_alpha)
print(f"成功重建图像: {filename}")
except Exception as e:
print(f"错误: 重建文件 {filename} 时出错: {e}")
continue
# 构造保存的PNG文件名
base_name = os.path.splitext(filename)[0]
png_filename = f"{base_name}_ista_reconstructed_fid_20250417_103329.png"
png_path = os.path.join(current_dir, png_filename)
plt.imshow(img_reconstructed, cmap='gray') # 设置为灰度图显示
plt.title('Reconstructed MRI Image')
plt.show()
plt.imsave(png_path, np.abs(img_reconstructed), cmap='gray')
if __name__ == "__main__":
main()
'''
Author: zhuwanjie 2268677665@qq.com
Date: 2025-04-21 11:34:18
LastEditors: zhuwanjie 2268677665@qq.com
LastEditTime: 2025-04-30 09:56:59
FilePath: \unet-pytorch\demo.py
Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
'''
import os
import numpy as np
import cv2
from matplotlib import pyplot as plt
from scipy.fft import fft2, ifft2, fftshift, ifftshift
# 从 K 空间数据重建图像
def reconstruct_image(kspace_data):
print("kspace_data shape:", kspace_data.shape)
im = fftshift(ifft2(kspace_data, (256, 256)), axes=0) #
# 取模得到实值图像
magnitude_image = np.abs(im)
# 归一化图像
normalized_image = (magnitude_image - magnitude_image.min()) / (magnitude_image.max() - magnitude_image.min())
# 转换为 8 位无符号整数
uint8_image = (normalized_image * 255).astype(np.uint8)
# 转换为 RGB 图像
rgb_image = cv2.cvtColor(uint8_image, cv2.COLOR_GRAY2RGB)
return rgb_image
def saveImg(path, img):
fig = plt.figure(figsize=(img.shape[1] / 100, img.shape[0] / 100), dpi=130)
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.savefig(path, bbox_inches='tight', pad_inches=0, transparent=True)
plt.close(fig)
def process_kspace_folder(kspace_folder_path, output_folder_path):
if not os.path.exists(output_folder_path):
os.makedirs(output_folder_path)
file_names = [f for f in os.listdir(kspace_folder_path) if f.endswith('.npy')]
for file_name in file_names:
kspace_file_path = os.path.join(kspace_folder_path, file_name)
kspace_data = np.load(kspace_file_path)
# 重建图像
reconstructed_image = reconstruct_image(kspace_data)
# 保存重建后的图像
output_file_name = os.path.splitext(file_name)[0] + '_reconstructed.png'
output_file_path = os.path.join(output_folder_path, output_file_name)
saveImg(output_file_path, reconstructed_image)
if __name__ == "__main__":
kspace_folder_path = r'D:\Users\Desktop\fid' # 请替换为实际的k空间数据文件夹路径
output_folder_path = r'D:\Users\Desktop\img' # 请替换为实际的输出文件夹路径
process_kspace_folder(kspace_folder_path, output_folder_path)
import os
from PIL import Image
from tqdm import tqdm
from unet import Unet
from utils.utils_metrics import compute_mIoU, show_results
'''
进行指标评估需要注意以下几点:
1、该文件生成的图为灰度图,因为值比较小,按照JPG形式的图看是没有显示效果的,所以看到近似全黑的图是正常的。
2、该文件计算的是验证集的miou,当前该库将测试集当作验证集使用,不单独划分测试集
3、仅有按照VOC格式数据训练的模型可以利用这个文件进行miou的计算。
'''
if __name__ == "__main__":
#---------------------------------------------------------------------------#
# miou_mode用于指定该文件运行时计算的内容
# miou_mode为0代表整个miou计算流程,包括获得预测结果、计算miou。
# miou_mode为1代表仅仅获得预测结果。
# miou_mode为2代表仅仅计算miou。
#---------------------------------------------------------------------------#
miou_mode = 0
#------------------------------#
# 分类个数+1、如2+1
#------------------------------#
num_classes = 21
#--------------------------------------------#
# 区分的种类,和json_to_dataset里面的一样
#--------------------------------------------#
name_classes = ["background","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# name_classes = ["_background_","cat","dog"]
#-------------------------------------------------------#
# 指向VOC数据集所在的文件夹
# 默认指向根目录下的VOC数据集
#-------------------------------------------------------#
VOCdevkit_path = 'VOCdevkit'
image_ids = open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),'r').read().splitlines()
gt_dir = os.path.join(VOCdevkit_path, "VOC2007/SegmentationClass/")
miou_out_path = "miou_out"
pred_dir = os.path.join(miou_out_path, 'detection-results')
if miou_mode == 0 or miou_mode == 1:
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
print("Load model.")
unet = Unet()
print("Load model done.")
print("Get predict result.")
for image_id in tqdm(image_ids):
image_path = os.path.join(VOCdevkit_path, "VOC2007/JPEGImages/"+image_id+".jpg")
image = Image.open(image_path)
image = unet.get_miou_png(image)
image.save(os.path.join(pred_dir, image_id + ".png"))
print("Get predict result done.")
if miou_mode == 0 or miou_mode == 2:
print("Get miou.")
hist, IoUs, PA_Recall, Precision = compute_mIoU(gt_dir, pred_dir, image_ids, num_classes, name_classes) # 执行计算mIoU的函数
print("Get miou done.")
show_results(miou_out_path, hist, IoUs, PA_Recall, Precision, name_classes)
\ No newline at end of file
import cv2
import os
import re
def natural_sort_key(s):
return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)]
def images_to_video(image_folder, video_name, fps=24, target_width=640, target_height=480):
# 支持更多图片格式
supported_extensions = ('.png', '.jpg', '.jpeg', '.bmp')
images = [img for img in os.listdir(image_folder) if img.endswith(supported_extensions)]
images.sort(key=natural_sort_key)
if not images:
print("未找到图片文件。")
return
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(video_name, fourcc, fps, (target_width, target_height))
for image in images:
img_path = os.path.join(image_folder, image)
try:
frame = cv2.imread(img_path)
if frame is None:
print(f"无法读取图片: {img_path}")
continue
# 调整图片尺寸
resized_frame = cv2.resize(frame, (target_width, target_height))
video.write(resized_frame)
except Exception as e:
print(f"处理图片 {img_path} 时出错: {e}")
video.release()
print(f"视频 {video_name} 已生成。")
if __name__ == "__main__":
image_folder = r'D:\Users\Desktop\item\unet-pytorch\img' # 替换为实际的图片文件夹路径
video_name = 'output_video.mp4'
fps = 1
# 设置目标尺寸
target_width = 640
target_height = 640
images_to_video(image_folder, video_name, fps, target_width, target_height)
\ No newline at end of file
img/2.jpg

13.1 KB

This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment