Loading TOC...


   $output_vector as cntk:variable,
   $target_vector as cntk:variable,
   $top-n as xs:unsignedLong,
   $axis as cntk:axis,
   [$name as xs:string]
) as cntk:function


This operation computes the classification error. It finds the index of the highest value in the output_vector and compares it to the actual ground truth label (the index of the hot bit in the target vector). The result is a scalar (i.e., one by one matrix). This is often used as an evaluation criterion. It cannot be used as a training criterion though since the gradient is not defined for it.

$output_vector The output values from the network.
$target_vector It is one-hot vector where the hot bit corresponds to the label index.
$top-n The top-n highest value to use in the output_vector.
$axis Axis along which the classification error will be computed.
$name The name of the function instance in the network.


  xquery version "1.0-ml";

  let $input-dims := 2
  let $num-classes := 2

  let $input-variable := cntk:input-variable(cntk:shape(($input-dims)), "float", fn:false(), fn:false(), "feature")
  let $training-data := json:to-array((2.2,3.5,5.1,5.7,1.3,5.5,3.5,2.4))
  let $input-value := cntk:batch(cntk:shape(($input-dims)), $training-data, cntk:cpu(), "float")

  let $labels-variable := cntk:input-variable(cntk:shape(($num-classes)), "float", fn:false(), fn:false(), "labels")
  let $labels := json:to-array((1,0,0,1,0,1,1,0))
  let $labels-value := cntk:batch(cntk:shape(($num-classes)), $labels, cntk:cpu(), "float")

  let $W := cntk:parameter(cntk:shape(($input-dims)), "float", cntk:glorot-uniform-initializer(), cntk:cpu(), "parameter")
  let $model := cntk:times($input-variable, $W, 1, -1)
  let $learner := cntk:sgd-learner(($W), cntk:learning-rate-schedule-from-constant(0.01), 
                  map:map() => map:with("l1-regularization-weight", 0.1)
                            => map:with("l2-regularization-weight", 0.2)
                            => map:with("gaussian-noise-injection-std-dev", cntk:learning-rate-schedule-from-constant(0.01, 100))
                            => map:with("gradient-clipping-threshold-per-sample", 0.3)  
                            => map:with("gradient-clipping-with-truncation", fn:false()))
  let $loss := cntk:binary-cross-entropy($model, $labels-variable, "loss_func")
  let $metric := cntk:classification-error($model, $labels-variable, 1, cntk:axis(-1), "metric")
  let $trainer := cntk:trainer($model, ($learner), $loss, $metric)

  let $input-pair := json:to-array(($input-variable, $input-value))
  let $labels-pair := json:to-array(($labels-variable, $labels-value))
  let $minibatch := json:to-array(($input-pair, $labels-pair))
  for $i in (1 to 5)
    let $_ := cntk:train-minibatch($trainer, $minibatch, fn:false())
    return ('loss-average: ', xdmp:type(cntk:previous-minibatch-loss-average($trainer)), '
            'evaluation-average: ', cntk:previous-minibatch-evaluation-average($trainer), '
            'sample-count: ', cntk:previous-minibatch-sample-count($trainer), '
            'total-sample: ', cntk:total-number-of-sample-seen($trainer), '


Stack Overflow iconStack Overflow: Get the most useful answers to questions from the MarkLogic community, or ask your own question.