Loading Darknet model error, (-215:Assertion failed) separator_index < line.size() in function ‘cv::dnn::darknet::ReadDarknetFromCfgStream’

weightsPath = ‘D:/yolo/darknet-master/build/darknet/x64/backup/yolov3-voc_last-3300.weights’
configPath = ‘D:/yolo/darknet-master/build/darknet/x64/mydata/yolov3-voc.cfg’
labelsPath = ‘D:/yolo/darknet-master/build/darknet/x64/mydata/voc.names’
imgPath = ‘D:/yolo/darknet-master/build/darknet/x64/2022521066-561.bmp’

net = cv2.dnn.readNetFromDarknet(configPath,weightsPath)
cv2.error: OpenCV(4.5.5) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\darknet\darknet_io.cpp:660: error: (-215:Assertion failed) separator_index < line.size() in function ‘cv::dnn::darknet::ReadDarknetFromCfgStream’

Could you help me?

can you paste your cfg file here ?
looks, like something there is kaputt

[net]
# Testing
# batch=1
# subdivisions=1
Training
batch=64
subdivisions=16
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.001
burn_in=1000
max_batches = 50200
policy=steps
steps=40000,45000
scales=.1,.1



[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky

# Downsample

[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

# Downsample

[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

# Downsample

[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear


[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

# Downsample

[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear


[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear


[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear


[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear


[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear


[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

# Downsample

[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky

[shortcut]
from=-3
activation=linear

######################

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky

[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear

[yolo]
mask = 6,7,8
anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
classes=1
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1

[route]
layers = -4

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[upsample]
stride=2

[route]
layers = -1, 61



[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear

[yolo]
mask = 3,4,5
anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
classes=1
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1

[route]
layers = -4

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[upsample]
stride=2

[route]
layers = -1, 36



[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear

[yolo]
mask = 0,1,2
anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
classes=1
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
random=1

OK, please help me, thanks

1 Like

well, i could reproduce your problem, the cfg is indeed broken.
please comment out Training in line 5:

# Training

(it’s neither a key-value pair, nor a [header])

ok , the problem has been solved, thank you

cv2.error: OpenCV(4.6.0) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\darknet\darknet_io.cpp:660: error: (-215:Assertion failed) separator_index < line.size() in function ‘cv::dnn::darknet::ReadDarknetFromCfgStream’

I am getting this error after commenting training part

show, what you have, please

I am also getting the same error
cv2.error: OpenCV(4.6.0) /root/opencv/modules/dnn/src/darknet/darknet_io.cpp:660: error: (-215:Assertion failed) separator_index < line.size() in function ‘ReadDarknetFromCfgStream’

[net]
# Testing
batch=256
subdivisions=8

#batch=1
#subdivisions=1

width=544
height=288
channels=3


angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.0005
burn_in=2000
max_batches = 200000
policy=steps
steps=50000,100000,150000
scales=.1,.1,.1

#burn_in=1000
#learning_rate=0.01
#policy=poly
#power=4
#max_batches=500200
momentum=0.9
decay=0.0005


##########################################################################
[convolutional]
batch_normalize=1
filters=24
size=3
stride=1
pad=1
activation=leaky


# Downsample
[convolutional]
batch_normalize=1
filters=36
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=24
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=36
size=3
stride=1
pad=1
activation=leaky

# 4 
[shortcut]
from=-3
activation=linear

# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky

# 6
[convolutional]
batch_normalize=1
filters=36
size=1
stride=1
pad=1
activation=leaky

# 7
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

# 8
[shortcut]
from=-3
activation=linear

# 9
[convolutional]
batch_normalize=1
filters=36
size=1
stride=1
pad=1
activation=leaky

# 10
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

# 11
[shortcut]
from=-3
activation=linear

# Downsample
# 12
[convolutional]
batch_normalize=1
filters=96
size=3
stride=2
pad=1
activation=leaky

# 13
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

# 14
[convolutional]
batch_normalize=1
filters=96
size=3
stride=1
pad=1
activation=leaky

# 15
[shortcut]
from=-3
activation=linear

# 16
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

# 17
[convolutional]
batch_normalize=1
filters=96
size=3
stride=1
pad=1
activation=leaky

# 18
[shortcut]
from=-3
activation=linear

# 19
# [convolutional]
# batch_normalize=1
# filters=64
# size=1
# stride=1
# pad=1
# activation=leaky

# 20
# [convolutional]
# batch_normalize=1
# filters=96
# size=3
# stride=1
# pad=1
# activation=leaky

# 21
# [shortcut]
# from=-3
# activation=linear

# 34
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

# 35
[convolutional]
batch_normalize=1
filters=96
size=3
stride=1
pad=1
activation=leaky

# 36
[shortcut]
from=-3
activation=linear

# Downsample
# 37
[convolutional]
batch_normalize=1
filters=160
size=3
stride=2
pad=1
activation=leaky

# 38
[convolutional]
batch_normalize=1
filters=96
size=1
stride=1
pad=1
activation=leaky

# 39
[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=leaky

# 40
[shortcut]
from=-3
activation=linear





















# 56
[convolutional]
batch_normalize=1
filters=96
size=1
stride=1
pad=1
activation=leaky

# 57
[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=leaky

# 58
[shortcut]
from=-3
activation=linear

# 59
[convolutional]
batch_normalize=1
filters=96
size=1
stride=1
pad=1
activation=leaky

# 60
[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=leaky

# 61
[shortcut]
from=-3
activation=linear

# Downsample
# 62
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky

# 63
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=leaky

# 64
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

# 65
[shortcut]
from=-3
activation=linear


# 72
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=leaky

# 73
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

# 74
[shortcut]
from=-3
activation=linear

######################

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=leaky

### SPP ###
[maxpool]
stride=1
size=3

[route]
layers=-2

[maxpool]
stride=1
size=5

[route]
layers=-4

[maxpool]
stride=1
size=7

[route]
layers=-1,-3,-5,-6

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky



[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear


[yolo]
mask = 6,7,8
anchors =   6, 11,   7, 20,  11, 15,  11, 29,  17, 36,  22, 54,  33, 72,  48,108,  85,149
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1


[route]
layers = -4

[convolutional]
batch_normalize=1
filters=96
size=1
stride=1
pad=1
activation=leaky

[upsample]
stride=2

[route]
layers = -1, 28



[convolutional]
batch_normalize=1
filters=96
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=160
activation=leaky



[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear


[yolo]
mask = 3,4,5
anchors =   6, 11,   7, 20,  11, 15,  11, 29,  17, 36,  22, 54,  33, 72,  48,108,  85,149
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1



[route]
layers = -4

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[upsample]
stride=2

[route]
layers = -1, 18



[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=96
activation=leaky



[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear


[yolo]
mask = 0,1,2
anchors =   6, 11,   7, 20,  11, 15,  11, 29,  17, 36,  22, 54,  33, 72,  48,108,  85,149
classes=1
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1

2 posts were split to a new topic: Cvlib: (-215:Assertion failed) separator_index < line.size()