Loading TOC...


   $learning-rate as xs:double,
   [$minibatch-size as xs:unsignedLong]
) as cntk:schedule


Returns a learning rate schedule, where the learning rate is fixed throughout the whole training process.

$learning-rate The learning rate.
$minibatch-size The minibatch size. Defaults to a sentinel value which means ignored. This is a "best-effort" parameter: there is no guarantee that the size of every minibatch is exactly $minibatch-size.


xquery version "1.0-ml";

let $input-dims := (2,2)
let $num-classes := (2, 2)
let $input-variable := cntk:input-variable(cntk:shape(($input-dims)), "float", fn:false(), fn:false(), "feature")
let $training-data := json:to-array((2.2,3.5,5.1,5.7,1.3,5.5,3.5,2.4))
let $input-value := cntk:batch(cntk:shape(($input-dims)), $training-data, cntk:cpu(), "float")

let $labels-variable := cntk:input-variable(cntk:shape(($num-classes)), "float", fn:false(), fn:false(), "labels")
let $labels := json:to-array((1,0,0,1,0,1,1,0))
let $labels-value := cntk:batch(cntk:shape(($num-classes)), $labels, cntk:cpu(), "float")

let $W := cntk:parameter(cntk:shape(($input-dims)), "float", cntk:glorot-uniform-initializer(), cntk:cpu(), "parameter")
let $model := cntk:times($input-variable, $W, 1, -1)
let $learner := cntk:sgd-learner(($W), cntk:learning-rate-schedule-from-constant(0.1))
let $loss := cntk:lambda-rank($model, $labels-variable, $labels-variable)
let $metric := cntk:classification-error($model, $labels-variable, 1, cntk:axis(-1), "metric")
let $trainer := cntk:trainer($model, $learner, $loss)

let $input-pair := json:to-array(($input-variable, $input-value))
let $labels-pair := json:to-array(($labels-variable, $labels-value))
let $minibatch := json:to-array(($input-pair, $labels-pair))
return cntk:train-minibatch($trainer, $minibatch, fn:false())

Stack Overflow iconStack Overflow: Get the most useful answers to questions from the MarkLogic community, or ask your own question.