# initial value for learning rate
learning-rate=0.3

# reward factor
reward-factor=0.2

# penalty factor
penalty-factor=0.6

# wspolczyynik ograniczajacy uczenie jedynek
lambda-factor=0.0

# regularization parameter
regularization=0.0

# maximum number of iterations per single training 
iterations=1000

# initial number of hiden neurons
hidden-neurons=1

# number of initializations for each training
number-of-networks=10

# growing strategy: 1 - add new node after each trining, 0 - static network (MLP)
network-architecture=1

# initial slope of transfer functions
slope=2.0

# slope adaptation: 0 - fixed, 1 - gradient descent, 2 - gradient descent with varying step
slope-learning=2

# learning strategy (stop criterium): 
# 1 - learning stops when mean value of error over last 'average' iterations is not grater than 0.1
# 2 - stop when average trainig error increases 
learning-strategy=1

# size of iteration window used to calculate mean error over last 'average' iterations
average=50

# have no influence on learning (required for backward compatibility)
force-zero=0
force-one=0
end-zero=0 

#wymiar generowanych funkcji boolowskich
#dim=1

# number of crossvalidation folds 
crossvalidation-folds=10

# CV strategy : 0 - stratiffied, 1 - random
crossvalidation-method=0

# number of repetitiions of training
tests-number=10

# NETWORK ATCHITECTURE
# neuron types in each layer: 0 linear, 1 sigmoid, 2 ssigmoid (two sigmoids), 3 bicentral
input=0
hidden=3
output=1

# weight initiation method : 0 - random , 1 - copy from last trained node
init-type=0


# number of neurons in hidden layer
hidden-neurons-count = 1

# Data transformation :
# 0 - none, 1 - normalization, 2 - standarization, 3 - binarization
transformation=0
