您的位置:首页 > 其它

yolo的训练和测试

2016-12-20 17:46 274 查看
官网: http://pjreddie.com/yolo/

相关文章: http://arxiv.org/abs/1506.02640

源代码: https://github.com/pjreddie/darknet.git

1 下载源代码

2 配置darknet

配置darknet ,我之前安装的是openCV3.0.0,无法正常配置darknet在openCV的环境(据说是2.7版本),所以不能用网络摄像机实时获取图像进行测试。

点击链接到这个问题的讨论

注意:

出现错误:

/bin/sh: 1: nvcc: not found
make: *** [obj/convolutional_kernels.o] Error 127
1
2
解决方法:

# 修改makefile
NVCC = /usr/local/cuda-7.5/bin/nvcc
1
2

3 数据集处理

(1) 获得数据集:我从
here 获取测数据集,实际上是二分类问题,检测的目标是stopsign和yeildsign。

(2) 对数据及进行标注,用BBox-Label-Tool

。然后修改代码。

(3) 改BB的代码:



Images里面是要进行标注的图像,放在不同的文件夹里面,001,002,003等等,每一个文件夹存放一类,如果你的类别是cat啊,bird啊啥的,先把他改成001,002什么的,然后标注完了再改回去。

具体标注方法参见http://blog.csdn.net/qq_30401249/article/details/51504816

(4)生成坐标和类别的txt文件以及图片路径文件。

完成标注之后,在label文件夹下会显示标注后的与每张图片对应的.txt文件,每个信息都有一下内容组成:

class_number

box1_x1 box1_y1 box1_width box1_height

box2_x1 box2_y1 box2_width box2_height

……

然后借助 darknet/scripts/convert.py

将其转化成程序中需要的格式:

class_number box1_x1_ratio box1_y1_ratio box1_width_ratio box1_height_ratio

class_number box2_x1_ratio box2_y1_ratio box2_width_ratio box2_height_ratio

……

在convert.py的代码中,需要修改类别以适应不同类别的label

""" Configure Paths"""
mypath = "labels/stopsign_original/" # 改
outpath = "labels/stopsign/" #改

cls = "stopsign" # 改
if cls not in classes:
exit(0)
# 删除cls_id = classes.index(cls)
# 改成如下
cls_id = 1 # 根据类别不同,改成不用的类标,与文件夹对应

wd = getcwd()
list_file = open('%s/%s_list.txt'%(wd, cls), 'w') # 存储图片绝对位置信息
1
2
3
4
5
6
7
8
9
10
11
12
13
例如:

转换前:

2

61 90 72 103

198 5 243 54

转换后:

0 0.123552123552 0.559278350515 0.0926640926641 0.10824742268

0 0.743243243243 0.585051546392 0.0579150579151 0.0773195876289

将生成的各种类别下的图片列表放到training_list.txt文件里

比如,有stopsign_listing.txt, yeildsign_listing.txt两类

cat stopsign_listing.txt* yeildsign_listing.txt > train.txt
1
然后将train.txt放在./scripts/文件夹下面,因为在src/yolo.c文件会引用train.y=txt。

void train_yolo(char *cfgfile, char *weightfile)
{
char *train_images = "path/to/scripts/train.txt";
char *backup_directory = "/path/to/backup/"; # 用绝对路径
}
1
2
3
4
5
(5) 生成标签文件

打开./data/labels/make_labels.py

加入需要生成的标签,注意标签的文件名stopsign.png和yeildsign.png需要与存放图像的文件夹images和存放框信息的labels文件夹下面的文件夹名称相同。

对应在darknet/src/yolo.c中是:

void run_yolo(int argc, char **argv)
{
int i;
for(i = 0; i < 20; ++i){
char buff[256];
sprintf(buff, "data/labels/%s.png", voc_names[i]);
voc_labels[i] = load_image_color(buff, 0, 0);
}
}
1
2
3
4
5
6
7
8
9

4 修改代码

需要修改的代码如下:

darknet/src/yolo.c

darknet/src/yolo_kernels.cu

darknet/cfg/yolo-tiny.cfg # 以yolo-tiny为例

yolo.c

# 修改路径

void train_yolo(char *cfgfile, char *weightfile)
{
char *train_images = "path/to/scripts/train.txt";
char *backup_directory = "/path/to/backup/";
# backup_directory用绝对路径,否则会出现一下错误:
# Saving weights to /backup/yolo-tiny-2class_100.weights
# Couldn't open file: /backup/yolo-tiny-2class_100.weights
srand(time(0));
data_seed = time(0);
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
int imgs = net.batch*net.subdivisions;
int i = *net.seen/imgs;
data train, buffer;

layer l = net.layers[net.n - 1];

int side = l.side;
int classes = l.classes;
float jitter = l.jitter;

list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);

load_args args = {0};
args.w = net.w;
args.h = net.h;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = side;
args.d = &buffer;
args.type = REGION_DATA;

pthread_t load_thread = load_data_in_thread(args);
clock_t time;
//while(i*imgs < N*120){
while(get_current_batch(net) < net.max_batches){
i += 1;
time=clock();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data_in_thread(args);

printf("Loaded: %lf seconds\n", sec(clock()-time));

time=clock();
float loss = train_network(net, train);
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;

printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
if(i%1000==0 || (i < 1000 && i%100 == 0)){
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# 对类别的修改
char *voc_names[] = {"stopsign", "yeildsign"};
image voc_labels[2];
......
void test_yolo(char *cfgfile, char *weightfile, char *filename, float thresh)
{
draw_detections(im, l.side*l.side*l.n, thresh, boxes, probs, voc_names, voc_labels, 2);
}
......
void run_yolo(int argc, char **argv)
{
int i;
for(i = 0; i < 2; ++i){
char buff[256];
sprintf(buff, "data/labels/%s.png", voc_names[i]);
voc_labels[i] = load_image_color(buff, 0, 0);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
yolo_kernels.cu

void *detect_in_thread(void *ptr)
{
float nms = .4;

detection_layer l = net.layers[net.n-1];
float *X = det_s.data;
float *predictions = network_predict(net, X);
free_image(det_s);
convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0);
if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms);
printf("\033[2J");
printf("\033[1;1H");
printf("\nFPS:%.0f\n",fps);
printf("Objects:\n\n");
draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, 20); # 20->2
return 0;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
yolo-tiny.cfg

[connected]
output= 1470 # SxSx(Bx5+class_num)
activation=linear

[detection]
classes=20 # 改成实际的class_num
coords=4 #框框的4个坐标
rescore=1 # 得分
side=7 # 分的越多,检测的可能越准
num=2
softmax=0
sqrt=1
jitter=.2

object_scale=1
noobject_scale=.5
class_scale=1
coord_scale=5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

5 pre-train

yolo中用到的pre-trained weights的格式是.conv.weights的文件,根据不同的model,要对已有的weights进行转换。

三种模型对应于不同的weight

yolo.cfg -> extraction.conv.weights

yolo-small.cfg -> strided.conv.weights

yolo-tiny.cfg -> darknet.conv.weights

./darknet partial cfg/extraction.cfg path/to/extraction.weights extraction.conv.weights 25 # ./darknet partial 转化网络 现有weights的路径 需要生成的weights的路径

./darknet partial cfg/darknet.cfg path/to/darknet.weights path/to/darknet.conv.weights 14
1
2
3
下载地址:
extraction.conv.weights

extraction.weights

strided.weights

darknet.weights

darknet.conv.weights

6 training

$ make #需要make一下
$ ./darknet yolo train cfg/yolo-tiny.cfg path/to/darknet.conv.weights
1
2

7 validation

./darknet yolo valid <cfgfile> <weights>

it will output textfiles containing all bounding boxes predicted for each image.

The list of images it will generate bounding boxes for is defined in src/yolo.c, so you can change that filename and recompile yolo.
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: