Source code for PMML43Ext

#!/usr/bin/env python

#
# Generated Wed Aug 29 19:55:55 2018 by generateDS.py version 2.28a.
#
# Command line options:
#   ('--no-warnings', '')
#   ('--export', 'write literal etree')
#   ('--super', 'nyoka.pmml.PMML43ExtSuper')
#   ('--subclass-suffix', '')
#   ('-o', 'nyoka.pmml.PMML43ExtSuper.py')
#   ('-s', 'nyoka.pmml.PMML43Ext.py')
#   ('-b', 'behaviors2.xml')
#   ('-f', '')
#
# Command line arguments:
#   nyoka.pmml.PMML43Ext.xsd
#
# Command line:
#   /Users/pasha/Desktop/PMML/PMML43Ext/gds_local.py --no-warnings --export="write literal etree" --super="nyoka.pmml.PMML43ExtSuper" --subclass-suffix -o "nyoka.pmml.PMML43ExtSuper.py" -s "nyoka.pmml.PMML43Ext.py" -b "behaviors2.xml" -f nyoka.pmml.PMML43Ext.xsd
#
# Current working directory (os.getcwd()):
#   PMML43Ext
#

import sys
from lxml import etree as etree_

import PMML43ExtSuper as supermod

[docs]def parsexml_(infile, parser=None, **kwargs): if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. parser = etree_.ETCompatXMLParser(huge_tree=True) doc = etree_.parse(infile, parser=parser, **kwargs) return doc
# # Globals # ExternalEncoding = 'utf-8' # # Data representation classes #
[docs]class AssociationModel(supermod.AssociationModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, numberOfTransactions=None, maxNumberOfItemsPerTA=None, avgNumberOfItemsPerTA=None, minimumSupport=None, minimumConfidence=None, lengthLimit=None, numberOfItems=None, numberOfItemsets=None, numberOfRules=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, LocalTransformations=None, Item=None, Itemset=None, AssociationRule=None, ModelVerification=None, Extension=None): super(AssociationModel, self).__init__(modelName, functionName, algorithmName, numberOfTransactions, maxNumberOfItemsPerTA, avgNumberOfItemsPerTA, minimumSupport, minimumConfidence, lengthLimit, numberOfItems, numberOfItemsets, numberOfRules, isScorable, MiningSchema, Output, ModelStats, LocalTransformations, Item, Itemset, AssociationRule, ModelVerification, Extension, ) # # XMLBehaviors #
[docs] def set_Item(self, Item, *args): self.Item = Item self.numberOfItems = len(self.Item)
[docs] def set_Item_wrapper(self, Item, *args): result = self.set_Item(Item, *args) return result
[docs] def add_Item(self, value, *args): self.Item.append(value) self.numberOfItems = len(self.Item)
[docs] def add_Item_wrapper(self, value, *args): result = self.add_Item(value, *args) return result
[docs] def insert_Item_at(self, index, value, *args): self.Item.insert(index, value) self.numberOfItems = len(self.Item)
[docs] def insert_Item_at_wrapper(self, index, value, *args): result = self.insert_Item_at(index, value, *args) return result
[docs] def set_Itemset(self, Itemset, *args): self.Itemset = Itemset self.numberOfItemsets = len(self.Itemset)
[docs] def set_Itemset_wrapper(self, Itemset, *args): result = self.set_Itemset(Itemset, *args) return result
[docs] def add_Itemset(self, value, *args): self.Itemset.append(value) self.numberOfItemsets = len(self.Itemset)
[docs] def add_Itemset_wrapper(self, value, *args): result = self.add_Itemset(value, *args) return result
[docs] def insert_Itemset_at(self, index, value, *args): self.Itemset.insert(index, value) self.numberOfItemsets = len(self.Itemset)
[docs] def insert_Itemset_at_wrapper(self, index, value, *args): result = self.insert_Itemset_at(index, value, *args) return result
[docs] def set_AssociationRule(self, Rules, *args): pass
[docs] def set_AssociationRule_wrapper(self, Rules, *args): result = self.set_AssociationRule(Rules, *args) return result
[docs] def add_AssociationRule(self, value, *args): self.AssociationRule.append(value) self.numberOfRules = len(self.AssociationRule)
[docs] def add_AssociationRule_wrapper(self, value, *args): result = self.add_AssociationRule(value, *args) return result
[docs] def insert_AssociationRule_at(self, index, value, *args): self.AssociationRule.insert(index, value) self.numberOfRules = len(self.AssociationRule)
[docs] def insert_AssociationRule_at_wrapper(self, index, value, *args): result = self.insert_AssociationRule_at(index, value, *args) return result
supermod.AssociationModel.subclass = AssociationModel # end class AssociationModel
[docs]class Item(supermod.Item): def __init__(self, id=None, value=None, field=None, category=None, mappedValue=None, weight=None, Extension=None): super(Item, self).__init__(id, value, field, category, mappedValue, weight, Extension, )
# # XMLBehaviors # supermod.Item.subclass = Item # end class Item
[docs]class Itemset(supermod.Itemset): def __init__(self, id=None, support=None, numberOfItems=None, Extension=None, ItemRef=None): super(Itemset, self).__init__(id, support, numberOfItems, Extension, ItemRef, ) # # XMLBehaviors #
[docs] def set_ItemRef(self, ItemRef, *args): self.ItemRef = ItemRef self.numberOfItems = len(self.ItemRef)
[docs] def set_ItemRef_wrapper(self, ItemRef, *args): result = self.set_ItemRef(ItemRef, *args) return result
[docs] def add_ItemRef(self, value, *args): self.ItemRef.append(value) self.numberOfItems = len(self.ItemRef)
[docs] def add_ItemRef_wrapper(self, value, *args): result = self.add_ItemRef(value, *args) return result
[docs] def insert_ItemRef_at(self, index, value, *args): self.ItemRef.insert(index, value) self.numberOfItems = len(self.ItemRef)
[docs] def insert_ItemRef_at_wrapper(self, index, value, *args): result = self.insert_ItemRef_at(index, value, *args) return result
supermod.Itemset.subclass = Itemset # end class Itemset
[docs]class ItemRef(supermod.ItemRef): def __init__(self, itemRef=None, Extension=None): super(ItemRef, self).__init__(itemRef, Extension, )
# # XMLBehaviors # supermod.ItemRef.subclass = ItemRef # end class ItemRef
[docs]class AssociationRule(supermod.AssociationRule): def __init__(self, antecedent=None, consequent=None, support=None, confidence=None, lift=None, leverage=None, affinity=None, id=None, Extension=None): super(AssociationRule, self).__init__(antecedent, consequent, support, confidence, lift, leverage, affinity, id, Extension, )
# # XMLBehaviors # supermod.AssociationRule.subclass = AssociationRule # end class AssociationRule
[docs]class BaselineModel(supermod.BaselineModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, TestDistributions=None, ModelVerification=None, Extension=None): super(BaselineModel, self).__init__(modelName, functionName, algorithmName, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, TestDistributions, ModelVerification, Extension, )
# # XMLBehaviors # supermod.BaselineModel.subclass = BaselineModel # end class BaselineModel
[docs]class TestDistributions(supermod.TestDistributions): def __init__(self, field=None, testStatistic=None, resetValue='0.0', windowSize='0', weightField=None, normalizationScheme=None, Baseline=None, Alternate=None, Extension=None): super(TestDistributions, self).__init__(field, testStatistic, resetValue, windowSize, weightField, normalizationScheme, Baseline, Alternate, Extension, )
# # XMLBehaviors # supermod.TestDistributions.subclass = TestDistributions # end class TestDistributions
[docs]class Baseline(supermod.Baseline): def __init__(self, AnyDistribution=None, GaussianDistribution=None, PoissonDistribution=None, UniformDistribution=None, Extension=None, CountTable=None, NormalizedCountTable=None, FieldRef=None): super(Baseline, self).__init__(AnyDistribution, GaussianDistribution, PoissonDistribution, UniformDistribution, Extension, CountTable, NormalizedCountTable, FieldRef, )
# # XMLBehaviors # supermod.Baseline.subclass = Baseline # end class Baseline
[docs]class Alternate(supermod.Alternate): def __init__(self, AnyDistribution=None, GaussianDistribution=None, PoissonDistribution=None, UniformDistribution=None, Extension=None): super(Alternate, self).__init__(AnyDistribution, GaussianDistribution, PoissonDistribution, UniformDistribution, Extension, )
# # XMLBehaviors # supermod.Alternate.subclass = Alternate # end class Alternate
[docs]class AnyDistribution(supermod.AnyDistribution): def __init__(self, mean=None, variance=None, Extension=None): super(AnyDistribution, self).__init__(mean, variance, Extension, )
# # XMLBehaviors # supermod.AnyDistribution.subclass = AnyDistribution # end class AnyDistribution
[docs]class GaussianDistribution(supermod.GaussianDistribution): def __init__(self, mean=None, variance=None, Extension=None): super(GaussianDistribution, self).__init__(mean, variance, Extension, )
# # XMLBehaviors # supermod.GaussianDistribution.subclass = GaussianDistribution # end class GaussianDistribution
[docs]class PoissonDistribution(supermod.PoissonDistribution): def __init__(self, mean=None, Extension=None): super(PoissonDistribution, self).__init__(mean, Extension, )
# # XMLBehaviors # supermod.PoissonDistribution.subclass = PoissonDistribution # end class PoissonDistribution
[docs]class UniformDistribution(supermod.UniformDistribution): def __init__(self, lower=None, upper=None, Extension=None): super(UniformDistribution, self).__init__(lower, upper, Extension, )
# # XMLBehaviors # supermod.UniformDistribution.subclass = UniformDistribution # end class UniformDistribution
[docs]class COUNT_TABLE_TYPE(supermod.COUNT_TABLE_TYPE): def __init__(self, sample=None, Extension=None, FieldValue=None, FieldValueCount=None): super(COUNT_TABLE_TYPE, self).__init__(sample, Extension, FieldValue, FieldValueCount, )
# # XMLBehaviors # supermod.COUNT_TABLE_TYPE.subclass = COUNT_TABLE_TYPE # end class COUNT_TABLE_TYPE
[docs]class FieldValue(supermod.FieldValue): def __init__(self, field=None, value=None, Extension=None, FieldValue_member=None, FieldValueCount=None): super(FieldValue, self).__init__(field, value, Extension, FieldValue_member, FieldValueCount, )
# # XMLBehaviors # supermod.FieldValue.subclass = FieldValue # end class FieldValue
[docs]class FieldValueCount(supermod.FieldValueCount): def __init__(self, field=None, value=None, count=None, Extension=None): super(FieldValueCount, self).__init__(field, value, count, Extension, )
# # XMLBehaviors # supermod.FieldValueCount.subclass = FieldValueCount # end class FieldValueCount
[docs]class BayesianNetworkModel(supermod.BayesianNetworkModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, BayesianNetworkNodes=None, ModelVerification=None, Extension=None): super(BayesianNetworkModel, self).__init__(modelName, functionName, algorithmName, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, BayesianNetworkNodes, ModelVerification, Extension, )
# # XMLBehaviors # supermod.BayesianNetworkModel.subclass = BayesianNetworkModel # end class BayesianNetworkModel
[docs]class BayesianNetworkNodes(supermod.BayesianNetworkNodes): def __init__(self, Extension=None, DiscreteNode=None, ContinuousNode=None): super(BayesianNetworkNodes, self).__init__(Extension, DiscreteNode, ContinuousNode, )
# # XMLBehaviors # supermod.BayesianNetworkNodes.subclass = BayesianNetworkNodes # end class BayesianNetworkNodes
[docs]class DiscreteNode(supermod.DiscreteNode): def __init__(self, name=None, count=None, Extension=None, DerivedField=None, DiscreteConditionalProbability=None, ValueProbability=None): super(DiscreteNode, self).__init__(name, count, Extension, DerivedField, DiscreteConditionalProbability, ValueProbability, )
# # XMLBehaviors # supermod.DiscreteNode.subclass = DiscreteNode # end class DiscreteNode
[docs]class ContinuousNode(supermod.ContinuousNode): def __init__(self, name=None, count=None, Extension=None, DerivedField=None, ContinuousConditionalProbability=None, ContinuousDistribution=None): super(ContinuousNode, self).__init__(name, count, Extension, DerivedField, ContinuousConditionalProbability, ContinuousDistribution, )
# # XMLBehaviors # supermod.ContinuousNode.subclass = ContinuousNode # end class ContinuousNode
[docs]class DiscreteConditionalProbability(supermod.DiscreteConditionalProbability): def __init__(self, count=None, Extension=None, ParentValue=None, ValueProbability=None): super(DiscreteConditionalProbability, self).__init__(count, Extension, ParentValue, ValueProbability, )
# # XMLBehaviors # supermod.DiscreteConditionalProbability.subclass = DiscreteConditionalProbability # end class DiscreteConditionalProbability
[docs]class ParentValue(supermod.ParentValue): def __init__(self, parent=None, value=None, Extension=None): super(ParentValue, self).__init__(parent, value, Extension, )
# # XMLBehaviors # supermod.ParentValue.subclass = ParentValue # end class ParentValue
[docs]class ValueProbability(supermod.ValueProbability): def __init__(self, value=None, probability=None, Extension=None): super(ValueProbability, self).__init__(value, probability, Extension, )
# # XMLBehaviors # supermod.ValueProbability.subclass = ValueProbability # end class ValueProbability
[docs]class ContinuousConditionalProbability(supermod.ContinuousConditionalProbability): def __init__(self, count=None, Extension=None, ParentValue=None, ContinuousDistribution=None): super(ContinuousConditionalProbability, self).__init__(count, Extension, ParentValue, ContinuousDistribution, )
# # XMLBehaviors # supermod.ContinuousConditionalProbability.subclass = ContinuousConditionalProbability # end class ContinuousConditionalProbability
[docs]class ContinuousDistribution(supermod.ContinuousDistribution): def __init__(self, Extension=None, TriangularDistributionForBN=None, NormalDistributionForBN=None, LognormalDistributionForBN=None, UniformDistributionForBN=None): super(ContinuousDistribution, self).__init__(Extension, TriangularDistributionForBN, NormalDistributionForBN, LognormalDistributionForBN, UniformDistributionForBN, )
# # XMLBehaviors # supermod.ContinuousDistribution.subclass = ContinuousDistribution # end class ContinuousDistribution
[docs]class TriangularDistributionForBN(supermod.TriangularDistributionForBN): def __init__(self, Extension=None, Mean=None, Lower=None, Upper=None): super(TriangularDistributionForBN, self).__init__(Extension, Mean, Lower, Upper, )
# # XMLBehaviors # supermod.TriangularDistributionForBN.subclass = TriangularDistributionForBN # end class TriangularDistributionForBN
[docs]class NormalDistributionForBN(supermod.NormalDistributionForBN): def __init__(self, Extension=None, Mean=None, Variance=None): super(NormalDistributionForBN, self).__init__(Extension, Mean, Variance, )
# # XMLBehaviors # supermod.NormalDistributionForBN.subclass = NormalDistributionForBN # end class NormalDistributionForBN
[docs]class LognormalDistributionForBN(supermod.LognormalDistributionForBN): def __init__(self, Extension=None, Mean=None, Variance=None): super(LognormalDistributionForBN, self).__init__(Extension, Mean, Variance, )
# # XMLBehaviors # supermod.LognormalDistributionForBN.subclass = LognormalDistributionForBN # end class LognormalDistributionForBN
[docs]class UniformDistributionForBN(supermod.UniformDistributionForBN): def __init__(self, Extension=None, Lower=None, Upper=None): super(UniformDistributionForBN, self).__init__(Extension, Lower, Upper, )
# # XMLBehaviors # supermod.UniformDistributionForBN.subclass = UniformDistributionForBN # end class UniformDistributionForBN
[docs]class Mean(supermod.Mean): def __init__(self, Extension=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(Mean, self).__init__(Extension, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.Mean.subclass = Mean # end class Mean
[docs]class Lower(supermod.Lower): def __init__(self, Extension=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(Lower, self).__init__(Extension, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.Lower.subclass = Lower # end class Lower
[docs]class Upper(supermod.Upper): def __init__(self, Extension=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(Upper, self).__init__(Extension, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.Upper.subclass = Upper # end class Upper
[docs]class Variance(supermod.Variance): def __init__(self, Extension=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(Variance, self).__init__(Extension, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.Variance.subclass = Variance # end class Variance
[docs]class ClusteringModel(supermod.ClusteringModel): """ A cluster model basically consists of a set of clusters Parameters ---------- modelName: element identifies the model with a unique name in the context of the PMML file functionName: Stores what type of problems it is ex classification or regression algorithmName: Stores algorithm name used in the model modelClass: specifies whether the clusters are defined by center-vectors or whether they are defined by the statistics numberOfClusters: attribute must be equal to the number of Cluster elements in the ClusteringModel isScorable: The isScorable attribute indicates whether the model is valid for scoring MiningSchema: list the fields that have to be provided in order to apply the model Output: describes a set of result values that can be returned from a model ClusteringField: The correspondence between input fields and their coordinates is defined via ClusteringFields MissingValueWeights: used to adjust distance or similarity measures for missing data Cluster: Clusters are identified by an implicit 1-based index, indicating the position in which each cluster appears in the model ModelVerification: ModelVerification schema provides a dataset of model inputs and known results that can be used to verify accurate results are generated, regardless of the environment """ def __init__(self, modelName=None, functionName=None, algorithmName=None, modelClass=None, numberOfClusters=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, LocalTransformations=None, ComparisonMeasure=None, ClusteringField=None, MissingValueWeights=None, Cluster=None, ModelVerification=None, Extension=None): super(ClusteringModel, self).__init__(modelName, functionName, algorithmName, modelClass, numberOfClusters, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, LocalTransformations, ComparisonMeasure, ClusteringField, MissingValueWeights, Cluster, ModelVerification, Extension, ) # # XMLBehaviors #
[docs] def set_Cluster(self, Cluster, *args): self.Cluster = Cluster self.numberOfClusters = len(self.Cluster)
[docs] def set_Cluster_wrapper(self, Cluster, *args): result = self.set_Cluster(Cluster, *args) return result
[docs] def add_Cluster(self, value, *args): self.Cluster.append(value) self.numberOfClusters = len(self.Cluster)
[docs] def add_Cluster_wrapper(self, value, *args): result = self.add_Cluster(value, *args) return result
[docs] def insert_Cluster_at(self, index, value, *args): self.Cluster.insert(index, value) self.numberOfClusters = len(self.Cluster)
[docs] def insert_Cluster_at_wrapper(self, index, value, *args): result = self.insert_Cluster_at(index, value, *args) return result
supermod.ClusteringModel.subclass = ClusteringModel # end class ClusteringModel
[docs]class MissingValueWeights(supermod.MissingValueWeights): def __init__(self, Extension=None, Array=None): super(MissingValueWeights, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.MissingValueWeights.subclass = MissingValueWeights # end class MissingValueWeights
[docs]class Cluster(supermod.Cluster): """ defined by a vector of center coordinates. Some distance measure is used to determine the nearest center, that is the nearest cluster for a given input record Parameters ---------- id: it represents unique identification for cluster name: The name of a cluster is is not required to be unique and is returned as the predictedDisplayValue size: size is descriptive only (not used in predictions) and intended to capture the size of each cluster KohonenMap: The element KohonenMap is appropriate for clustering models that were produced by a Kohonen map algorithm Array: containing the center coordinates for the cluster Partition: A Partition contains statistics for a subset of records, for example it can describe the population in a cluster, each Partition describes the distribution per field """ def __init__(self, id=None, name=None, size=None, Extension=None, KohonenMap=None, Array=None, Partition=None, Covariances=None): super(Cluster, self).__init__(id, name, size, Extension, KohonenMap, Array, Partition, Covariances, )
# # XMLBehaviors # supermod.Cluster.subclass = Cluster # end class Cluster
[docs]class KohonenMap(supermod.KohonenMap): """ The element KohonenMap is appropriate for clustering models that were produced by a Kohonen map algorithm Parameters ---------- coord1 : Describe the position of the current cluster(cluster 1) in a map with up to three dimensions coord2 : Describe the position of the current cluster(cluster2) in a map with up to three dimensions coord3 : Describe the position of the current cluster(cluster3) in a map with up to three dimensions """ def __init__(self, coord1=None, coord2=None, coord3=None, Extension=None): super(KohonenMap, self).__init__(coord1, coord2, coord3, Extension, )
# # XMLBehaviors # supermod.KohonenMap.subclass = KohonenMap # end class KohonenMap
[docs]class Covariances(supermod.Covariances): """ Covariances Parameters ---------- Matrix: stores coordinate-by-coordinate variances (diagonal cells) and covariances (non-diagonal cells) """ def __init__(self, Extension=None, Matrix=None): super(Covariances, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.Covariances.subclass = Covariances # end class Covariances
[docs]class ClusteringField(supermod.ClusteringField): """ ClusteringField Parameters ---------- field: refers (by name) to a MiningField or to a DerivedField isCenterField : indicates whether the respective field is a center field fieldWeight : used in the comparison functions in order to compute the comparison measure similarityScale : the distance such that similarity becomes 0.5 compareFunction : is a function of taking two field values and a similarityScale to define similarity/distance. It can override the general specification of compareFunction in ComparisonMeasure """ def __init__(self, field=None, isCenterField='true', fieldWeight='1', similarityScale=None, compareFunction=None, Extension=None, Comparisons=None): super(ClusteringField, self).__init__(field, isCenterField, fieldWeight, similarityScale, compareFunction, Extension, Comparisons, )
# # XMLBehaviors # supermod.ClusteringField.subclass = ClusteringField # end class ClusteringField
[docs]class Comparisons(supermod.Comparisons): def __init__(self, Extension=None, Matrix=None): super(Comparisons, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.Comparisons.subclass = Comparisons # end class Comparisons
[docs]class ComparisonMeasure(supermod.ComparisonMeasure): """ Per ClusteringModel there is one aggregation function: depending on the attribute kind in ComparisonMeasure the aggregated value is optimal if it is 0 (for distance measure) or greater values indicate optimal fit (for similarity measure) """ def __init__(self, kind=None, compareFunction='absDiff', minimum=None, maximum=None, Extension=None, euclidean=None, squaredEuclidean=None, chebychev=None, cityBlock=None, minkowski=None, simpleMatching=None, jaccard=None, tanimoto=None, binarySimilarity=None): super(ComparisonMeasure, self).__init__(kind, compareFunction, minimum, maximum, Extension, euclidean, squaredEuclidean, chebychev, cityBlock, minkowski, simpleMatching, jaccard, tanimoto, binarySimilarity, )
# # XMLBehaviors # supermod.ComparisonMeasure.subclass = ComparisonMeasure # end class ComparisonMeasure
[docs]class euclidean(supermod.euclidean): def __init__(self, Extension=None): super(euclidean, self).__init__(Extension, )
# # XMLBehaviors # supermod.euclidean.subclass = euclidean # end class euclidean
[docs]class squaredEuclidean(supermod.squaredEuclidean): def __init__(self, Extension=None): super(squaredEuclidean, self).__init__(Extension, )
# # XMLBehaviors # supermod.squaredEuclidean.subclass = squaredEuclidean # end class squaredEuclidean
[docs]class cityBlock(supermod.cityBlock): def __init__(self, Extension=None): super(cityBlock, self).__init__(Extension, )
# # XMLBehaviors # supermod.cityBlock.subclass = cityBlock # end class cityBlock
[docs]class chebychev(supermod.chebychev): def __init__(self, Extension=None): super(chebychev, self).__init__(Extension, )
# # XMLBehaviors # supermod.chebychev.subclass = chebychev # end class chebychev
[docs]class minkowski(supermod.minkowski): def __init__(self, p_parameter=None, Extension=None): super(minkowski, self).__init__(p_parameter, Extension, )
# # XMLBehaviors # supermod.minkowski.subclass = minkowski # end class minkowski
[docs]class simpleMatching(supermod.simpleMatching): def __init__(self, Extension=None): super(simpleMatching, self).__init__(Extension, )
# # XMLBehaviors # supermod.simpleMatching.subclass = simpleMatching # end class simpleMatching
[docs]class jaccard(supermod.jaccard): def __init__(self, Extension=None): super(jaccard, self).__init__(Extension, )
# # XMLBehaviors # supermod.jaccard.subclass = jaccard # end class jaccard
[docs]class tanimoto(supermod.tanimoto): def __init__(self, Extension=None): super(tanimoto, self).__init__(Extension, )
# # XMLBehaviors # supermod.tanimoto.subclass = tanimoto # end class tanimoto
[docs]class binarySimilarity(supermod.binarySimilarity): def __init__(self, c00_parameter=None, c01_parameter=None, c10_parameter=None, c11_parameter=None, d00_parameter=None, d01_parameter=None, d10_parameter=None, d11_parameter=None, Extension=None): super(binarySimilarity, self).__init__(c00_parameter, c01_parameter, c10_parameter, c11_parameter, d00_parameter, d01_parameter, d10_parameter, d11_parameter, Extension, )
# # XMLBehaviors # supermod.binarySimilarity.subclass = binarySimilarity # end class binarySimilarity
[docs]class DataDictionary(supermod.DataDictionary): """ The DataDictionary contains definitions for fields as used in mining models. It specifies the types and value ranges. These definitions are assumed to be independent of specific data sets as used for training or scoring a specific model Parameters ---------- numberOfFields : The value numberOfFields is the number of fields which are defined in the content of DataDictionary, this number can be added for consistency checks DataField: The name of a DataField must be unique from other names in the DataDictionary and, with few exceptions, unique from the names of other fields in the PMML document Taxonomy: The optional attribute taxonomy refers to a taxonomy of values. The value is a name of a taxonomy. It describes a hierarchy of values. The attribute is only applicable to categorical fields """ def __init__(self, numberOfFields=None, Extension=None, DataField=None, Taxonomy=None): super(DataDictionary, self).__init__(numberOfFields, Extension, DataField, Taxonomy, ) # # XMLBehaviors #
[docs] def set_DataField(self, DataField, *args): self.DataField = DataField self.numberOfFields = len(self.DataField)
[docs] def set_DataField_wrapper(self, DataField, *args): result = self.set_DataField(DataField, *args) return result
[docs] def add_DataField(self, value, *args): self.DataField.append(value) self.numberOfFields = len(self.DataField)
[docs] def add_DataField_wrapper(self, value, *args): result = self.add_DataField(value, *args) return result
[docs] def insert_DataField_at(self, index, value, *args): self.DataField.insert(index, value) self.numberOfFields = len(self.DataField)
[docs] def insert_DataField_at_wrapper(self, index, value, *args): result = self.insert_DataField_at(index, value, *args) return result
supermod.DataDictionary.subclass = DataDictionary # end class DataDictionary
[docs]class DataField(supermod.DataField): """ DataField contains features name and target name Parameters ---------- name: The name of a DataField must be unique from other names in the DataDictionary and, with few exceptions, unique from the names of other fields in the PMML document displayName: DisplayName can be used when the application calls the PMML consumer. Once the consumer has received the parameters and matched to the MiningFields, the displayName is not relevant anymore. Only name is significant for internal processing, optype: The fields are separated into different types depending on which operations are defined on the values; this is defined by the attribute optype dataType: This field contain data type of the feature and target name in the DataDictionary taxonomy: The optional attribute taxonomy refers to a taxonomy of values. The value is a name of a taxonomy. It describes a hierarchy of values. The attribute is only applicable to categorical fields Value: Value is used to define the value ranges for fields in the DataDictionary """ def __init__(self, name=None, displayName=None, optype=None, dataType=None, mimeType=None, taxonomy=None, isCyclic='0', Extension=None, Interval=None, Value=None): super(DataField, self).__init__(name, displayName, optype, dataType, mimeType, taxonomy, isCyclic, Extension, Interval, Value, )
# # XMLBehaviors # supermod.DataField.subclass = DataField # end class DataField
[docs]class Value(supermod.Value): def __init__(self, value=None, displayValue=None, property='valid', Extension=None): super(Value, self).__init__(value, displayValue, property, Extension, )
# # XMLBehaviors # supermod.Value.subclass = Value # end class Value
[docs]class Interval(supermod.Interval): def __init__(self, closure=None, leftMargin=None, rightMargin=None, Extension=None): super(Interval, self).__init__(closure, leftMargin, rightMargin, Extension, )
# # XMLBehaviors # supermod.Interval.subclass = Interval # end class Interval
[docs]class DefineFunction(supermod.DefineFunction): def __init__(self, name=None, optype=None, dataType=None, Extension=None, ParameterField=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(DefineFunction, self).__init__(name, optype, dataType, Extension, ParameterField, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.DefineFunction.subclass = DefineFunction # end class DefineFunction
[docs]class ParameterField(supermod.ParameterField): def __init__(self, name=None, optype=None, dataType=None): super(ParameterField, self).__init__(name, optype, dataType, )
# # XMLBehaviors # supermod.ParameterField.subclass = ParameterField # end class ParameterField
[docs]class Apply(supermod.Apply): """ Apply is an element used in DerivedField Parameters ---------- function: derive a value by applying a function to one or more parameters mapMissingTo: used to map a missing result to the value specified by the attribute FieldRef: Field references are simply pass-throughs to fields previously defined in the DataDictionary, a DerivedField, or a result field Constant: used in expressions which have multiple arguments. The actual value of a constant is given by the content of the element NormContinuous: used to implement simple normalization functions such as the z-score transformation" (X - m ) / s, where m is the mean value and s is the standard deviation NormDiscrete: refer to a certain input field define a fan-out function which maps a single input field to a set of normalized fields Discretize: Takes the input field as input and maps values less than 0 to negative and other values to positive MapValues: element can be used to create missing value indicators for categorical variables TextIndex: TextIndex expression to extract frequency information from the text input field, for a given term. The TextIndex element fully configures how the text input should be indexed, including case sensitivity, normalization and other settings Aggregate: summarize or collect groups of values, e.g., compute average Lag: defined as the value of the given input field a fixed number of records prior to the current one,If the desired value is not present, for a given record, the lag will be set to missing """ def __init__(self, function=None, mapMissingTo=None, defaultValue=None, invalidValueTreatment='returnInvalid', Extension=None, Apply_member=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(Apply, self).__init__(function, mapMissingTo, defaultValue, invalidValueTreatment, Extension, Apply_member, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.Apply.subclass = Apply # end class Apply
[docs]class DeepNetwork(supermod.DeepNetwork): def __init__(self, modelName=None, functionName=None, algorithmName=None, normalizationMethod='none', numberOfLayers=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, TrainingParameters=None, NetworkLayer=None, NeuralOutputs=None, ModelVerification=None, Extension=None): super(DeepNetwork, self).__init__(modelName, functionName, algorithmName, normalizationMethod, numberOfLayers, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, TrainingParameters, NetworkLayer, NeuralOutputs, ModelVerification, Extension, ) # # XMLBehaviors #
[docs] def set_NetworkLayer(self, NetworkLayer, *args): self.NetworkLayer = NetworkLayer self.numberOfLayers = len(self.NetworkLayer)
[docs] def set_NetworkLayer_wrapper(self, NetworkLayer, *args): result = self.set_NetworkLayer(NetworkLayer, *args) return result
[docs] def add_NetworkLayer(self, value, *args): self.NetworkLayer.append(value) self.numberOfLayers = len(self.NetworkLayer)
[docs] def add_NetworkLayer_wrapper(self, value, *args): result = self.add_NetworkLayer(value, *args) return result
[docs] def insert_NetworkLayer_at(self, index, value, *args): self.NetworkLayer.insert(index, value) self.numberOfLayers = len(self.NetworkLayer)
[docs] def insert_NetworkLayer_at_wrapper(self, index, value, *args): result = self.insert_NetworkLayer_at(index, value, *args) return result
supermod.DeepNetwork.subclass = DeepNetwork # end class DeepNetwork
[docs]class NetworkLayer(supermod.NetworkLayer): def __init__(self, normalizationMethod='none', layerType=None, layerId=None, connectionLayerId=None, inputFieldName=None, Extension=None, LayerParameters=None, LayerWeights=None, LayerBias=None): super(NetworkLayer, self).__init__(normalizationMethod, layerType, layerId, connectionLayerId, inputFieldName, Extension, LayerParameters, LayerWeights, LayerBias, )
# # XMLBehaviors # supermod.NetworkLayer.subclass = NetworkLayer # end class NetworkLayer
[docs]class TrainingParameters(supermod.TrainingParameters): def __init__(self, architectureName=None, dataset=None, framework=None, Extension=None, Losses=None, Metrics=None, Optimizers=None): super(TrainingParameters, self).__init__(architectureName, dataset, framework, Extension, Losses, Metrics, Optimizers, )
# # XMLBehaviors # supermod.TrainingParameters.subclass = TrainingParameters # end class TrainingParameters
[docs]class Metrics(supermod.Metrics): def __init__(self, top_k_categories_for_accuracy=None, metric=None, Extension=None): super(Metrics, self).__init__(top_k_categories_for_accuracy, metric, Extension, )
# # XMLBehaviors # supermod.Metrics.subclass = Metrics # end class Metrics
[docs]class Optimizers(supermod.Optimizers): def __init__(self, clipnorm=None, clipvalue=None, Extension=None, SGD=None, RMSprop=None, Adagrad=None, Adadelta=None, Adam=None, Adamax=None, Nadam=None): super(Optimizers, self).__init__(clipnorm, clipvalue, Extension, SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam, )
# # XMLBehaviors # supermod.Optimizers.subclass = Optimizers # end class Optimizers
[docs]class Losses(supermod.Losses): def __init__(self, loss=None, Extension=None): super(Losses, self).__init__(loss, Extension, )
# # XMLBehaviors # supermod.Losses.subclass = Losses # end class Losses
[docs]class SGD(supermod.SGD): def __init__(self, learningRate=None, momentum=None, decayRate=None, nesterov=None, Extension=None): super(SGD, self).__init__(learningRate, momentum, decayRate, nesterov, Extension, )
# # XMLBehaviors # supermod.SGD.subclass = SGD # end class SGD
[docs]class RMSprop(supermod.RMSprop): def __init__(self, learningRate=None, rho=None, decayRate=None, epsilon=None, Extension=None): super(RMSprop, self).__init__(learningRate, rho, decayRate, epsilon, Extension, )
# # XMLBehaviors # supermod.RMSprop.subclass = RMSprop # end class RMSprop
[docs]class Adagrad(supermod.Adagrad): def __init__(self, learningRate=None, decayRate=None, epsilon=None, Extension=None): super(Adagrad, self).__init__(learningRate, decayRate, epsilon, Extension, )
# # XMLBehaviors # supermod.Adagrad.subclass = Adagrad # end class Adagrad
[docs]class Adadelta(supermod.Adadelta): def __init__(self, learningRate=None, rho=None, decayRate=None, epsilon=None, Extension=None): super(Adadelta, self).__init__(learningRate, rho, decayRate, epsilon, Extension, )
# # XMLBehaviors # supermod.Adadelta.subclass = Adadelta # end class Adadelta
[docs]class Adam(supermod.Adam): def __init__(self, learningRate=None, beta_1=None, beta_2=None, decayRate=None, epsilon=None, Extension=None): super(Adam, self).__init__(learningRate, beta_1, beta_2, decayRate, epsilon, Extension, )
# # XMLBehaviors # supermod.Adam.subclass = Adam # end class Adam
[docs]class Adamax(supermod.Adamax): def __init__(self, learningRate=None, beta_1=None, beta_2=None, decayRate=None, epsilon=None, Extension=None): super(Adamax, self).__init__(learningRate, beta_1, beta_2, decayRate, epsilon, Extension, )
# # XMLBehaviors # supermod.Adamax.subclass = Adamax # end class Adamax
[docs]class Nadam(supermod.Nadam): def __init__(self, learningRate=None, beta_1=None, beta_2=None, schedule_decay=None, epsilon=None, Extension=None): super(Nadam, self).__init__(learningRate, beta_1, beta_2, schedule_decay, epsilon, Extension, )
# # XMLBehaviors # supermod.Nadam.subclass = Nadam # end class Nadam
[docs]class LayerWeights(supermod.LayerWeights): def __init__(self, weightsShape=None, weightsFlattenAxis=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): super(LayerWeights, self).__init__(weightsShape, weightsFlattenAxis, Extension, valueOf_, mixedclass_, content_, ) # # XMLBehaviors #
[docs] def export(self, outfile, level, namespace_='', name_='LayerWeights', namespacedef_='', pretty_print=True, *args): imported_ns_def_ = supermod.GenerateDSNamespaceDefs_.get('LayerWeights') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ supermod.showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='LayerWeights') if self.hasContent_(): outfile.write('>%s' % (eol_, )) if not pretty_print: self.content_[0].value = self.content_[0].value.replace('\t', '').replace(' ', '') self.valueOf_ = self.valueOf_.replace('\t', '').replace(' ', '') self.exportChildren(outfile, level + 1, namespace_='', name_='LayerWeights', pretty_print=pretty_print) outfile.write(eol_) supermod.showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, ))
[docs] def export_wrapper(self, outfile, level, namespace_='', name_='LayerWeights', namespacedef_='', pretty_print=True, *args): result = self.export(outfile, level, namespace_='', name_='LayerWeights', namespacedef_='', pretty_print=True, *args) return result
def __init__(self, src=None, embedded=False, Extension=None, valueOf_=None, mixedclass_=None, content_=None, *args): self.original_tagname_ = None self.src = supermod._cast(None, src) if Extension is None: self.Extension = [] else: self.Extension = supermod.Extension self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def __init___wrapper(self, src=None, embedded=False, Extension=None, valueOf_=None, mixedclass_=None, content_=None, *args): result = self.__init__(src=None, embedded=False, Extension=None, valueOf_=None, mixedclass_=None, content_=None, *args) return result
[docs] def weights(self, *args): import nyoka if self.src is not None: raw_content = open(self.src, "r").read() elif self.content_ is not None and self.content_[0].value is not None: raw_content = self.content_[0].value raw_content = raw_content.replace(' ', '') raw_content = raw_content.replace('\t', '') raw_content = raw_content.replace('\n', '') if raw_content.startswith("data:float32;base64,") or raw_content.startswith("data:float64;base64,") or raw_content.startswith("data:float16;base64,"): raw_content = raw_content[20:] + "==" elif raw_content.startswith("data:float;base64,"): raw_content = raw_content[18:] + "==" else: return None from Base64 import FloatBase64 if raw_content.find("+") > 0: return FloatBase64.to_floatArray_urlsafe(raw_content) else: return FloatBase64.to_floatArray(raw_content)
[docs] def weights_wrapper(self, *args): result = self.weights(*args) return result
supermod.LayerWeights.subclass = LayerWeights # end class LayerWeights
[docs]class LayerBias(supermod.LayerBias): def __init__(self, biasShape=None, biasFlattenAxis=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): super(LayerBias, self).__init__(biasShape, biasFlattenAxis, Extension, valueOf_, mixedclass_, content_, ) # # XMLBehaviors #
[docs] def export(self, outfile, level, namespace_='', name_='LayerBias', namespacedef_='', pretty_print=True, *args): imported_ns_def_ = supermod.GenerateDSNamespaceDefs_.get('LayerBias') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ supermod.showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='LayerBias') if self.hasContent_(): outfile.write('>%s' % (eol_, )) if not pretty_print: self.content_[0].value = self.content_[0].value.replace('\t', '').replace(' ', '') self.valueOf_ = self.valueOf_.replace('\t', '').replace(' ', '') self.exportChildren(outfile, level + 1, namespace_='', name_='LayerBias', pretty_print=pretty_print) outfile.write(eol_) supermod.showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, ))
[docs] def export_wrapper(self, outfile, level, namespace_='', name_='LayerBias', namespacedef_='', pretty_print=True, *args): result = self.export(outfile, level, namespace_='', name_='LayerBias', namespacedef_='', pretty_print=True, *args) return result
[docs] def weights(self, *args): import nyoka if self.src is not None: raw_content = open(self.src, "r").read() elif self.content_ is not None and self.content_[0].value is not None: raw_content = self.content_[0].value raw_content = raw_content.replace(' ', '') raw_content = raw_content.replace('\t', '') raw_content = raw_content.replace('\n', '') if raw_content.startswith("data:float32;base64,") or raw_content.startswith("data:float64;base64,") or raw_content.startswith("data:float16;base64,"): raw_content = raw_content[20:] + "==" elif raw_content.startswith("data:float;base64,"): raw_content = raw_content[18:] + "==" else: return None from Base64 import FloatBase64 if raw_content.find("+") > 0: return FloatBase64.to_floatArray_urlsafe(raw_content) else: return FloatBase64.to_floatArray(raw_content)
[docs] def weights_wrapper(self, *args): result = self.weights(*args) return result
def __init__(self, src=None, embedded=False, Extension=None, valueOf_=None, mixedclass_=None, content_=None, *args): self.original_tagname_ = None self.src = supermod._cast(None, src) if Extension is None: self.Extension = [] else: self.Extension = supermod.Extension self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def __init___wrapper(self, src=None, embedded=False, Extension=None, valueOf_=None, mixedclass_=None, content_=None, *args): result = self.__init__(src=None, embedded=False, Extension=None, valueOf_=None, mixedclass_=None, content_=None, *args) return result
supermod.LayerBias.subclass = LayerBias # end class LayerBias
[docs]class LayerParameters(supermod.LayerParameters): def __init__(self, activationFunction=None, inputDimension=None, outputDimension=None, featureMaps=None, kernel=None, paddingType=None, stride=None, dilationRate=None, poolSize=None, depthMultiplier=None, paddingDims=None, croppingDims=None, upsamplingSize=None, return_sequences=None, return_state=None, stateful=None, inputLength=None, recurrentUnits=None, recurrentActivation=None, recurrentDropout=None, go_backwards=None, batchNormalizationEpsilon=None, flattenAxis=None, batchNormalizationAxis=None, batchNormalizationMomentum=None, batchNormalizationCenter=None, batchNormalizationScale=None, gaussianNoiseStdev=None, gaussianDropoutRate=None, alphaDropoutRate=None, alphaDropoutSeed=None, betaInitializer=None, gammaInitializer=None, movingMeanInitializer=None, movingVarianceInitializer=None, recurrentInitializer=None, betaRegularizer=None, gammaRegularizer=None, betaConstraint=None, gammaConstraint=None, kernelInitializer=None, biasInitializer=None, kernelRegularizer=None, biasRegularizer=None, kernelConstraint=None, biasConstraint=None, depthwiseConstraint=None, pointwiseConstraint=None, recurrentConstraint=None, batchSize=None, dropoutRate=None, dropoutNoiseShape=None, dropoutSeed=None, generalLUAlpha=None, reshapeTarget=None, permuteDims=None, repeatVectorTimes=None, activityRegularizerL1=None, activityRegularizerL2=None, maskValue=None, mergeLayerOp=None, mergeLayerDotOperationAxis=None, mergeLayerDotNormalize=None, mergeLayerConcatOperationAxes=None, slicingAxis=None, Extension=None): super(LayerParameters, self).__init__(activationFunction, inputDimension, outputDimension, featureMaps, kernel, paddingType, stride, dilationRate, poolSize, depthMultiplier, paddingDims, croppingDims, upsamplingSize, return_sequences, return_state, stateful, inputLength, recurrentUnits, recurrentActivation, recurrentDropout, go_backwards, batchNormalizationEpsilon, flattenAxis, batchNormalizationAxis, batchNormalizationMomentum, batchNormalizationCenter, batchNormalizationScale, gaussianNoiseStdev, gaussianDropoutRate, alphaDropoutRate, alphaDropoutSeed, betaInitializer, gammaInitializer, movingMeanInitializer, movingVarianceInitializer, recurrentInitializer, betaRegularizer, gammaRegularizer, betaConstraint, gammaConstraint, kernelInitializer, biasInitializer, kernelRegularizer, biasRegularizer, kernelConstraint, biasConstraint, depthwiseConstraint, pointwiseConstraint, recurrentConstraint, batchSize, dropoutRate, dropoutNoiseShape, dropoutSeed, generalLUAlpha, reshapeTarget, permuteDims, repeatVectorTimes, activityRegularizerL1, activityRegularizerL2, maskValue, mergeLayerOp, mergeLayerDotOperationAxis, mergeLayerDotNormalize, mergeLayerConcatOperationAxes, slicingAxis, Extension, )
# # XMLBehaviors # supermod.LayerParameters.subclass = LayerParameters # end class LayerParameters
[docs]class GaussianProcessModel(supermod.GaussianProcessModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, optimizer=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, RadialBasisKernel=None, ARDSquaredExponentialKernel=None, AbsoluteExponentialKernel=None, GeneralizedExponentialKernel=None, TrainingInstances=None, ModelVerification=None, Extension=None): super(GaussianProcessModel, self).__init__(modelName, functionName, algorithmName, optimizer, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, RadialBasisKernel, ARDSquaredExponentialKernel, AbsoluteExponentialKernel, GeneralizedExponentialKernel, TrainingInstances, ModelVerification, Extension, )
# # XMLBehaviors # supermod.GaussianProcessModel.subclass = GaussianProcessModel # end class GaussianProcessModel
[docs]class RadialBasisKernel(supermod.RadialBasisKernel): def __init__(self, description=None, gamma='1', noiseVariance='1', lambda_='1', Extension=None): super(RadialBasisKernel, self).__init__(description, gamma, noiseVariance, lambda_, Extension, )
# # XMLBehaviors # supermod.RadialBasisKernel.subclass = RadialBasisKernel # end class RadialBasisKernel
[docs]class ARDSquaredExponentialKernel(supermod.ARDSquaredExponentialKernel): def __init__(self, description=None, gamma='1', noiseVariance='1', Extension=None, Lambda=None): super(ARDSquaredExponentialKernel, self).__init__(description, gamma, noiseVariance, Extension, Lambda, )
# # XMLBehaviors # supermod.ARDSquaredExponentialKernel.subclass = ARDSquaredExponentialKernel # end class ARDSquaredExponentialKernel
[docs]class AbsoluteExponentialKernel(supermod.AbsoluteExponentialKernel): def __init__(self, description=None, gamma='1', noiseVariance='1', Extension=None, Lambda=None): super(AbsoluteExponentialKernel, self).__init__(description, gamma, noiseVariance, Extension, Lambda, )
# # XMLBehaviors # supermod.AbsoluteExponentialKernel.subclass = AbsoluteExponentialKernel # end class AbsoluteExponentialKernel
[docs]class GeneralizedExponentialKernel(supermod.GeneralizedExponentialKernel): def __init__(self, description=None, gamma='1', noiseVariance='1', degree='1', Extension=None, Lambda=None): super(GeneralizedExponentialKernel, self).__init__(description, gamma, noiseVariance, degree, Extension, Lambda, )
# # XMLBehaviors # supermod.GeneralizedExponentialKernel.subclass = GeneralizedExponentialKernel # end class GeneralizedExponentialKernel
[docs]class Lambda(supermod.Lambda): def __init__(self, Extension=None, Array=None): super(Lambda, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.Lambda.subclass = Lambda # end class Lambda
[docs]class GeneralRegressionModel(supermod.GeneralRegressionModel): def __init__(self, targetVariableName=None, modelType=None, modelName=None, functionName=None, algorithmName=None, targetReferenceCategory=None, cumulativeLink=None, linkFunction=None, linkParameter=None, trialsVariable=None, trialsValue=None, distribution=None, distParameter=None, offsetVariable=None, offsetValue=None, modelDF=None, endTimeVariable=None, startTimeVariable=None, subjectIDVariable=None, statusVariable=None, baselineStrataVariable=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, ParameterList=None, FactorList=None, CovariateList=None, PPMatrix=None, PCovMatrix=None, ParamMatrix=None, EventValues=None, BaseCumHazardTables=None, ModelVerification=None, Extension=None): super(GeneralRegressionModel, self).__init__(targetVariableName, modelType, modelName, functionName, algorithmName, targetReferenceCategory, cumulativeLink, linkFunction, linkParameter, trialsVariable, trialsValue, distribution, distParameter, offsetVariable, offsetValue, modelDF, endTimeVariable, startTimeVariable, subjectIDVariable, statusVariable, baselineStrataVariable, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, ParameterList, FactorList, CovariateList, PPMatrix, PCovMatrix, ParamMatrix, EventValues, BaseCumHazardTables, ModelVerification, Extension, )
# # XMLBehaviors # supermod.GeneralRegressionModel.subclass = GeneralRegressionModel # end class GeneralRegressionModel
[docs]class ParameterList(supermod.ParameterList): def __init__(self, Extension=None, Parameter=None): super(ParameterList, self).__init__(Extension, Parameter, )
# # XMLBehaviors # supermod.ParameterList.subclass = ParameterList # end class ParameterList
[docs]class Parameter(supermod.Parameter): def __init__(self, name=None, label=None, referencePoint='0', Extension=None): super(Parameter, self).__init__(name, label, referencePoint, Extension, )
# # XMLBehaviors # supermod.Parameter.subclass = Parameter # end class Parameter
[docs]class FactorList(supermod.FactorList): def __init__(self, Extension=None, Predictor=None): super(FactorList, self).__init__(Extension, Predictor, )
# # XMLBehaviors # supermod.FactorList.subclass = FactorList # end class FactorList
[docs]class CovariateList(supermod.CovariateList): def __init__(self, Extension=None, Predictor=None): super(CovariateList, self).__init__(Extension, Predictor, )
# # XMLBehaviors # supermod.CovariateList.subclass = CovariateList # end class CovariateList
[docs]class Predictor(supermod.Predictor): def __init__(self, name=None, contrastMatrixType=None, Extension=None, Categories=None, Matrix=None): super(Predictor, self).__init__(name, contrastMatrixType, Extension, Categories, Matrix, )
# # XMLBehaviors # supermod.Predictor.subclass = Predictor # end class Predictor
[docs]class Categories(supermod.Categories): def __init__(self, Extension=None, Category=None): super(Categories, self).__init__(Extension, Category, )
# # XMLBehaviors # supermod.Categories.subclass = Categories # end class Categories
[docs]class Category(supermod.Category): def __init__(self, value=None, Extension=None): super(Category, self).__init__(value, Extension, )
# # XMLBehaviors # supermod.Category.subclass = Category # end class Category
[docs]class PPMatrix(supermod.PPMatrix): def __init__(self, Extension=None, PPCell=None): super(PPMatrix, self).__init__(Extension, PPCell, )
# # XMLBehaviors # supermod.PPMatrix.subclass = PPMatrix # end class PPMatrix
[docs]class PPCell(supermod.PPCell): def __init__(self, value=None, predictorName=None, parameterName=None, targetCategory=None, Extension=None): super(PPCell, self).__init__(value, predictorName, parameterName, targetCategory, Extension, )
# # XMLBehaviors # supermod.PPCell.subclass = PPCell # end class PPCell
[docs]class PCovMatrix(supermod.PCovMatrix): def __init__(self, type_=None, Extension=None, PCovCell=None): super(PCovMatrix, self).__init__(type_, Extension, PCovCell, )
# # XMLBehaviors # supermod.PCovMatrix.subclass = PCovMatrix # end class PCovMatrix
[docs]class PCovCell(supermod.PCovCell): def __init__(self, pRow=None, pCol=None, tRow=None, tCol=None, value=None, targetCategory=None, Extension=None): super(PCovCell, self).__init__(pRow, pCol, tRow, tCol, value, targetCategory, Extension, )
# # XMLBehaviors # supermod.PCovCell.subclass = PCovCell # end class PCovCell
[docs]class ParamMatrix(supermod.ParamMatrix): def __init__(self, Extension=None, PCell=None): super(ParamMatrix, self).__init__(Extension, PCell, )
# # XMLBehaviors # supermod.ParamMatrix.subclass = ParamMatrix # end class ParamMatrix
[docs]class PCell(supermod.PCell): def __init__(self, targetCategory=None, parameterName=None, beta=None, df=None, Extension=None): super(PCell, self).__init__(targetCategory, parameterName, beta, df, Extension, )
# # XMLBehaviors # supermod.PCell.subclass = PCell # end class PCell
[docs]class BaseCumHazardTables(supermod.BaseCumHazardTables): def __init__(self, maxTime=None, Extension=None, BaselineStratum=None, BaselineCell=None): super(BaseCumHazardTables, self).__init__(maxTime, Extension, BaselineStratum, BaselineCell, )
# # XMLBehaviors # supermod.BaseCumHazardTables.subclass = BaseCumHazardTables # end class BaseCumHazardTables
[docs]class BaselineStratum(supermod.BaselineStratum): def __init__(self, value=None, label=None, maxTime=None, Extension=None, BaselineCell=None): super(BaselineStratum, self).__init__(value, label, maxTime, Extension, BaselineCell, )
# # XMLBehaviors # supermod.BaselineStratum.subclass = BaselineStratum # end class BaselineStratum
[docs]class BaselineCell(supermod.BaselineCell): def __init__(self, time=None, cumHazard=None, Extension=None): super(BaselineCell, self).__init__(time, cumHazard, Extension, )
# # XMLBehaviors # supermod.BaselineCell.subclass = BaselineCell # end class BaselineCell
[docs]class EventValues(supermod.EventValues): def __init__(self, Extension=None, Value=None, Interval=None): super(EventValues, self).__init__(Extension, Value, Interval, )
# # XMLBehaviors # supermod.EventValues.subclass = EventValues # end class EventValues
[docs]class PMML(supermod.PMML): """this is the root of the pmml document""" def __init__(self, version=None, Header=None, script=None, MiningBuildTask=None, DataDictionary=None, TransformationDictionary=None, AssociationModel=None, BayesianNetworkModel=None, BaselineModel=None, ClusteringModel=None, DeepNetwork=None, GaussianProcessModel=None, GeneralRegressionModel=None, MiningModel=None, NaiveBayesModel=None, NearestNeighborModel=None, NeuralNetwork=None, RegressionModel=None, RuleSetModel=None, SequenceModel=None, Scorecard=None, SupportVectorMachineModel=None, TextModel=None, TimeSeriesModel=None, TreeModel=None, Extension=None): super(PMML, self).__init__(version, Header, script, MiningBuildTask, DataDictionary, TransformationDictionary, AssociationModel, BayesianNetworkModel, BaselineModel, ClusteringModel, DeepNetwork, GaussianProcessModel, GeneralRegressionModel, MiningModel, NaiveBayesModel, NearestNeighborModel, NeuralNetwork, RegressionModel, RuleSetModel, SequenceModel, Scorecard, SupportVectorMachineModel, TextModel, TimeSeriesModel, TreeModel, Extension, ) # # XMLBehaviors #
[docs] def export(self, outfile, level, namespace_='', name_='PMML', namespacedef_='', pretty_print=True, *args): imported_ns_def_ = supermod.GenerateDSNamespaceDefs_.get('Timestamp') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ supermod.showIndent(outfile, level, pretty_print) outfile.write('<?xml version="1.0" encoding="UTF-8"?>' + eol_) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() outfile.write(' xmlns="http://www.dmg.org/PMML-4_3"') self.exportAttributes(outfile, level, already_processed, namespace_, name_='Timestamp') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='Timestamp', pretty_print=pretty_print) supermod.showIndent(outfile, 0, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, ))
[docs] def export_wrapper(self, outfile, level, namespace_='', name_='PMML', namespacedef_='', pretty_print=True, *args): result = self.export(outfile, level, namespace_='', name_='PMML', namespacedef_='', pretty_print=True, *args) return result
supermod.PMML.subclass = PMML # end class PMML
[docs]class MiningBuildTask(supermod.MiningBuildTask): def __init__(self, Extension=None): super(MiningBuildTask, self).__init__(Extension, )
# # XMLBehaviors # supermod.MiningBuildTask.subclass = MiningBuildTask # end class MiningBuildTask
[docs]class Extension(supermod.Extension): def __init__(self, extender=None, name=None, value=None, anytypeobjs_=None): super(Extension, self).__init__(extender, name, value, anytypeobjs_, )
# # XMLBehaviors # supermod.Extension.subclass = Extension # end class Extension
[docs]class ArrayType(supermod.ArrayType): def __init__(self, n=None, type_=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): super(ArrayType, self).__init__(n, type_, Extension, valueOf_, mixedclass_, content_, ) # # XMLBehaviors #
[docs] def export(self, outfile, level, namespace_='', name_='ArrayType', namespacedef_='', pretty_print=True, *args): imported_ns_def_ = supermod.GenerateDSNamespaceDefs_.get('ArrayType') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ supermod.showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='ArrayType') if self.hasContent_(): outfile.write('>%s' % (eol_, )) if not pretty_print: self.content_[0].value = self.content_[0].value.replace('\t', '').replace(' ', '') self.valueOf_ = self.valueOf_.replace('\t', '').replace(' ', '') self.exportChildren(outfile, level + 1, namespace_='', name_='ArrayType', pretty_print=pretty_print) outfile.write(eol_) supermod.showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, ))
[docs] def export_wrapper(self, outfile, level, namespace_='', name_='ArrayType', namespacedef_='', pretty_print=True, *args): result = self.export(outfile, level, namespace_='', name_='ArrayType', namespacedef_='', pretty_print=True, *args) return result
supermod.ArrayType.subclass = ArrayType # end class ArrayType
[docs]class INT_SparseArray(supermod.INT_SparseArray): def __init__(self, n=None, defaultValue='0', Indices=None, INT_Entries=None): super(INT_SparseArray, self).__init__(n, defaultValue, Indices, INT_Entries, )
# # XMLBehaviors # supermod.INT_SparseArray.subclass = INT_SparseArray # end class INT_SparseArray
[docs]class REAL_SparseArray(supermod.REAL_SparseArray): def __init__(self, n=None, defaultValue='0', Indices=None, REAL_Entries=None): super(REAL_SparseArray, self).__init__(n, defaultValue, Indices, REAL_Entries, )
# # XMLBehaviors # supermod.REAL_SparseArray.subclass = REAL_SparseArray # end class REAL_SparseArray
[docs]class Matrix(supermod.Matrix): def __init__(self, kind='any', nbRows=None, nbCols=None, diagDefault=None, offDiagDefault=None, Array=None, MatCell=None): super(Matrix, self).__init__(kind, nbRows, nbCols, diagDefault, offDiagDefault, Array, MatCell, )
# # XMLBehaviors # supermod.Matrix.subclass = Matrix # end class Matrix
[docs]class MatCell(supermod.MatCell): def __init__(self, row=None, col=None, valueOf_=None): super(MatCell, self).__init__(row, col, valueOf_, )
# # XMLBehaviors # supermod.MatCell.subclass = MatCell # end class MatCell supermod.Header.subclass = Header # end class Header
[docs]class script(supermod.script): def __init__(self, for_=None, class_=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): super(script, self).__init__(for_, class_, Extension, valueOf_, mixedclass_, content_, ) # # XMLBehaviors #
[docs] def export(self, outfile, level, namespace_='', name_='script', namespacedef_='', pretty_print=True, *args): imported_ns_def_ = supermod.GenerateDSNamespaceDefs_.get('script') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='script') if self.hasContent_(): outfile.write('>%s' % (eol_, )) if pretty_print: lines = [] code = self.valueOf_.lstrip('\n') leading_spaces = len(code) - len(code.lstrip(' ')) for line in code.split('\n'): lines.append(line[leading_spaces:]) code = '\n'.join(lines) indent = " " * (level + 1) count = code.count('\n') indented = indent + code.replace("\n", "\n" + indent, count - 1) self.content_ = [supermod.MixedContainer(1, 2, "", str(indented))] self.valueOf_ = str(indented) self.exportChildren(outfile, level + 1, namespace_='', name_='script', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, ))
[docs] def export_wrapper(self, outfile, level, namespace_='', name_='script', namespacedef_='', pretty_print=True, *args): result = self.export(outfile, level, namespace_='', name_='script', namespacedef_='', pretty_print=True, *args) return result
supermod.script.subclass = script # end class script
[docs]class Application(supermod.Application): def __init__(self, name=None, version=None, Extension=None): super(Application, self).__init__(name, version, Extension, )
# # XMLBehaviors # supermod.Application.subclass = Application # end class Application
[docs]class Annotation(supermod.Annotation): def __init__(self, Extension=None, valueOf_=None, mixedclass_=None, content_=None): super(Annotation, self).__init__(Extension, valueOf_, mixedclass_, content_, )
# # XMLBehaviors # supermod.Annotation.subclass = Annotation # end class Annotation
[docs]class Timestamp(supermod.Timestamp): def __init__(self, Extension=None, valueOf_=None, mixedclass_=None, content_=None): super(Timestamp, self).__init__(Extension, valueOf_, mixedclass_, content_, ) # # XMLBehaviors #
[docs] def export(self, outfile, level, namespace_='', name_='Timestamp', namespacedef_='', pretty_print=True, *args): imported_ns_def_ = supermod.GenerateDSNamespaceDefs_.get('Timestamp') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ supermod.showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='Timestamp') if self.hasContent_(): outfile.write('>%s' % ('', )) self.exportChildren(outfile, level + 1, namespace_='', name_='Timestamp', pretty_print=pretty_print) supermod.showIndent(outfile, 0, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, ))
[docs] def export_wrapper(self, outfile, level, namespace_='', name_='Timestamp', namespacedef_='', pretty_print=True, *args): result = self.export(outfile, level, namespace_='', name_='Timestamp', namespacedef_='', pretty_print=True, *args) return result
supermod.Timestamp.subclass = Timestamp # end class Timestamp
[docs]class NearestNeighborModel(supermod.NearestNeighborModel): """ The root element of an XML k-NN model. Each instance of a k-NN model must start with this element Parameters ---------- modelName : element identifies the model with a unique name in the context of the PMML file functionName : Stores what type of problems it is ex classification or regression algorithmName : Stores algorithm name used in the model numberOfNeighbors : Specifies K, the number of desired neighbors continuousScoringMethod : Specify the scoring (or combining) method based on the continuous or categorical target values of K neighbors categoricalScoringMethod : Specify the scoring (or combining) method based on the continuous or categorical target values of K neighbors instanceIdVariable Contains the instance ID variable name and so refers to the name of a field in InstanceFields threshold : Defines a very small positive number to be used for "weighted" scoring methods to avoid numerical problems when distance or similarity measure is zero isScorable : The isScorable attribute indicates whether the model is valid for scoring MiningSchema : list the fields that have to be provided in order to apply the model Output : describes a set of result values that can be returned from a model Targets : The target values are derived from a variety of elements in the models TrainingInstances : element serves as an envelope for defining all of the training instances. It contains the definition of the fields included in the training instances as well as a table for representing the training data itself ComparisonMeasure : Defines the distance or similarity measure used to find the k-nearest neighbors KNNInputs : This element serves as an envelope for defining all of the k-NN inputs """ def __init__(self, modelName=None, functionName=None, algorithmName=None, numberOfNeighbors=None, continuousScoringMethod='average', categoricalScoringMethod='majorityVote', instanceIdVariable=None, threshold='0.001', isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, TrainingInstances=None, ComparisonMeasure=None, KNNInputs=None, ModelVerification=None, Extension=None): super(NearestNeighborModel, self).__init__(modelName, functionName, algorithmName, numberOfNeighbors, continuousScoringMethod, categoricalScoringMethod, instanceIdVariable, threshold, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, TrainingInstances, ComparisonMeasure, KNNInputs, ModelVerification, Extension, )
# # XMLBehaviors # supermod.NearestNeighborModel.subclass = NearestNeighborModel # end class NearestNeighborModel
[docs]class TrainingInstances(supermod.TrainingInstances): """ This element serves as an envelope for defining all of the training instances. It contains the definition of the fields included in the training instances as well as a table for representing the training data itself Parameters ---------- isTransformed : Used as a flag to determine whether or not the training instances have already been transformed recordCount : Defines the number of training instances or records. This number needs to match the number of instances defined in the element InlineTable or in the external data if TableLocator is used fieldCount : Defines the number of fields (features + targets). This number needs to match the number of InstanceField elements defined under InstanceFields TableLocator : Allows for the training data to be stored in an external table. Such a table can then be referenced by the TableLocator element which implements a kind of URL for tables InlineTable : Allows for the training instances to be part of the PMML document itself. When used in k-NN models, a row in an InlineTable should contain a sequence of elements representing the input fields """ def __init__(self, isTransformed=False, recordCount=None, fieldCount=None, Extension=None, InstanceFields=None, TableLocator=None, InlineTable=None): super(TrainingInstances, self).__init__(isTransformed, recordCount, fieldCount, Extension, InstanceFields, TableLocator, InlineTable, )
# # XMLBehaviors # supermod.TrainingInstances.subclass = TrainingInstances # end class TrainingInstances
[docs]class InstanceFields(supermod.InstanceFields): """ The InstanceFields element serves as an envelope for all the fields included in the training instances """ def __init__(self, Extension=None, InstanceField=None): super(InstanceFields, self).__init__(Extension, InstanceField, )
# # XMLBehaviors # supermod.InstanceFields.subclass = InstanceFields # end class InstanceFields
[docs]class InstanceField(supermod.InstanceField): """ InstanceField Parameters ---------- field : Contains the name of a DataField or a DerivedField column : Defines the name of the tag or column used by element InlineTable. This attribute is required if element InlineTable is used to represent training data """ def __init__(self, field=None, column=None, Extension=None): super(InstanceField, self).__init__(field, column, Extension, )
# # XMLBehaviors # supermod.InstanceField.subclass = InstanceField # end class InstanceField
[docs]class KNNInputs(supermod.KNNInputs): """ This element serves as an envelope for defining all of the k-NN inputs Parameters ---------- KNNInput : elements which define the fields used to query the k-NN model """ def __init__(self, Extension=None, KNNInput=None): super(KNNInputs, self).__init__(Extension, KNNInput, )
# # XMLBehaviors # supermod.KNNInputs.subclass = KNNInputs # end class KNNInputs
[docs]class KNNInput(supermod.KNNInput): """ elements which define the fields used to query the k-NN model Parameters ---------- field : Contains the name of a DataField or a DerivedField fieldWeight : Defines the importance factor for the field. It is used in the comparison functions to compute the comparison measure. The value must be a number greater than 0. The default value is 1.0 compareFunction : attribute, this is either defined as default in element ComparisonMeasure or it can be defined per KNNInput """ def __init__(self, field=None, fieldWeight='1', compareFunction=None, Extension=None): super(KNNInput, self).__init__(field, fieldWeight, compareFunction, Extension, )
# # XMLBehaviors # supermod.KNNInput.subclass = KNNInput # end class KNNInput
[docs]class MiningSchema(supermod.MiningSchema): """ list the fields which a user has to provide in order to apply the model. Parameters ---------- MiningField : identify which of the DataFields defined in the DataDictionary are used in the model """ def __init__(self, Extension=None, MiningField=None): super(MiningSchema, self).__init__(Extension, MiningField, )
# # XMLBehaviors # supermod.MiningSchema.subclass = MiningSchema # end class MiningSchema
[docs]class MiningField(supermod.MiningField): """ identify which of the DataFields defined in the DataDictionary are used in the model Parameters ---------- name : string symbolic name of field, must refer to a field in the DataDictionary usageType : field used as input (independent field) optype : The attribute value overrides the corresponding value in the DataField importance : states the relative importance of the field outliers : it shows the records which are out of trend lowValue : Extreme low point to define outliers highValue : Extreme high point to define outliers missingValueReplacement: If this attribute is specified then a missing input value is automatically replaced by the given value missingValueTreatment : this attribute just indicates how the missingValueReplacement was derived invalidValueTreatment : This field specifies how invalid input values are handled """ def __init__(self, name=None, usageType='active', optype=None, importance=None, outliers='asIs', lowValue=None, highValue=None, missingValueReplacement=None, missingValueTreatment=None, invalidValueTreatment='returnInvalid', Extension=None): super(MiningField, self).__init__(name, usageType, optype, importance, outliers, lowValue, highValue, missingValueReplacement, missingValueTreatment, invalidValueTreatment, Extension, ) # # XMLBehaviors #
[docs] def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MiningField', *args): if self.name is not None and 'name' not in already_processed: already_processed.add('name') outfile.write(' name=%s' % (supermod.quote_attrib(self.name), )) if self.usageType is not None and 'usageType' not in already_processed: already_processed.add('usageType') outfile.write(' usageType=%s' % (supermod.quote_attrib(self.usageType), )) if self.optype is not None and 'optype' not in already_processed: already_processed.add('optype') outfile.write(' optype=%s' % (supermod.quote_attrib(self.optype), )) if self.importance is not None and 'importance' not in already_processed: already_processed.add('importance') outfile.write(' importance=%s' % (supermod.quote_attrib(self.importance), )) if self.outliers != "asIs" and 'outliers' not in already_processed: already_processed.add('outliers') outfile.write(' outliers=%s' % (supermod.quote_attrib(self.outliers), )) if self.lowValue is not None and 'lowValue' not in already_processed: already_processed.add('lowValue') outfile.write(' lowValue=%s' % (supermod.quote_attrib(self.lowValue), )) if self.highValue is not None and 'highValue' not in already_processed: already_processed.add('highValue') outfile.write(' highValue=%s' % (supermod.quote_attrib(self.highValue), )) if self.missingValueReplacement is not None and 'missingValueReplacement' not in already_processed: already_processed.add('missingValueReplacement') outfile.write(' missingValueReplacement=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.missingValueReplacement), input_name='missingValueReplacement')), )) if self.missingValueTreatment is not None and 'missingValueTreatment' not in already_processed: already_processed.add('missingValueTreatment') outfile.write(' missingValueTreatment=%s' % (supermod.quote_attrib(self.missingValueTreatment), )) if self.invalidValueTreatment != "returnInvalid" and 'invalidValueTreatment' not in already_processed: already_processed.add('invalidValueTreatment') outfile.write(' invalidValueTreatment=%s' % (supermod.quote_attrib(self.invalidValueTreatment), ))
[docs] def exportAttributes_wrapper(self, outfile, level, already_processed, namespace_='', name_='MiningField', *args): result = self.exportAttributes(outfile, level, already_processed, namespace_='', name_='MiningField', *args) return result
supermod.MiningField.subclass = MiningField # end class MiningField
[docs]class ModelExplanation(supermod.ModelExplanation): def __init__(self, Extension=None, PredictiveModelQuality=None, ClusteringModelQuality=None, Correlations=None): super(ModelExplanation, self).__init__(Extension, PredictiveModelQuality, ClusteringModelQuality, Correlations, )
# # XMLBehaviors # supermod.ModelExplanation.subclass = ModelExplanation # end class ModelExplanation
[docs]class PredictiveModelQuality(supermod.PredictiveModelQuality): def __init__(self, targetField=None, dataName=None, dataUsage='training', meanError=None, meanAbsoluteError=None, meanSquaredError=None, rootMeanSquaredError=None, r_squared=None, adj_r_squared=None, sumSquaredError=None, sumSquaredRegression=None, numOfRecords=None, numOfRecordsWeighted=None, numOfPredictors=None, degreesOfFreedom=None, fStatistic=None, AIC=None, BIC=None, AICc=None, Extension=None, ConfusionMatrix=None, LiftData=None, ROC=None): super(PredictiveModelQuality, self).__init__(targetField, dataName, dataUsage, meanError, meanAbsoluteError, meanSquaredError, rootMeanSquaredError, r_squared, adj_r_squared, sumSquaredError, sumSquaredRegression, numOfRecords, numOfRecordsWeighted, numOfPredictors, degreesOfFreedom, fStatistic, AIC, BIC, AICc, Extension, ConfusionMatrix, LiftData, ROC, )
# # XMLBehaviors # supermod.PredictiveModelQuality.subclass = PredictiveModelQuality # end class PredictiveModelQuality
[docs]class ClusteringModelQuality(supermod.ClusteringModelQuality): def __init__(self, dataName=None, SSE=None, SSB=None): super(ClusteringModelQuality, self).__init__(dataName, SSE, SSB, )
# # XMLBehaviors # supermod.ClusteringModelQuality.subclass = ClusteringModelQuality # end class ClusteringModelQuality
[docs]class LiftData(supermod.LiftData): def __init__(self, targetFieldValue=None, targetFieldDisplayValue=None, rankingQuality=None, Extension=None, ModelLiftGraph=None, OptimumLiftGraph=None, RandomLiftGraph=None): super(LiftData, self).__init__(targetFieldValue, targetFieldDisplayValue, rankingQuality, Extension, ModelLiftGraph, OptimumLiftGraph, RandomLiftGraph, )
# # XMLBehaviors # supermod.LiftData.subclass = LiftData # end class LiftData
[docs]class ModelLiftGraph(supermod.ModelLiftGraph): def __init__(self, Extension=None, LiftGraph=None): super(ModelLiftGraph, self).__init__(Extension, LiftGraph, )
# # XMLBehaviors # supermod.ModelLiftGraph.subclass = ModelLiftGraph # end class ModelLiftGraph
[docs]class OptimumLiftGraph(supermod.OptimumLiftGraph): def __init__(self, Extension=None, LiftGraph=None): super(OptimumLiftGraph, self).__init__(Extension, LiftGraph, )
# # XMLBehaviors # supermod.OptimumLiftGraph.subclass = OptimumLiftGraph # end class OptimumLiftGraph
[docs]class RandomLiftGraph(supermod.RandomLiftGraph): def __init__(self, Extension=None, LiftGraph=None): super(RandomLiftGraph, self).__init__(Extension, LiftGraph, )
# # XMLBehaviors # supermod.RandomLiftGraph.subclass = RandomLiftGraph # end class RandomLiftGraph
[docs]class LiftGraph(supermod.LiftGraph): def __init__(self, Extension=None, XCoordinates=None, YCoordinates=None, BoundaryValues=None, BoundaryValueMeans=None): super(LiftGraph, self).__init__(Extension, XCoordinates, YCoordinates, BoundaryValues, BoundaryValueMeans, )
# # XMLBehaviors # supermod.LiftGraph.subclass = LiftGraph # end class LiftGraph
[docs]class XCoordinates(supermod.XCoordinates): def __init__(self, Extension=None, Array=None): super(XCoordinates, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.XCoordinates.subclass = XCoordinates # end class XCoordinates
[docs]class YCoordinates(supermod.YCoordinates): def __init__(self, Extension=None, Array=None): super(YCoordinates, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.YCoordinates.subclass = YCoordinates # end class YCoordinates
[docs]class BoundaryValues(supermod.BoundaryValues): def __init__(self, Extension=None, Array=None): super(BoundaryValues, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.BoundaryValues.subclass = BoundaryValues # end class BoundaryValues
[docs]class BoundaryValueMeans(supermod.BoundaryValueMeans): def __init__(self, Extension=None, Array=None): super(BoundaryValueMeans, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.BoundaryValueMeans.subclass = BoundaryValueMeans # end class BoundaryValueMeans
[docs]class ROC(supermod.ROC): def __init__(self, positiveTargetFieldValue=None, positiveTargetFieldDisplayValue=None, negativeTargetFieldValue=None, negativeTargetFieldDisplayValue=None, Extension=None, ROCGraph=None): super(ROC, self).__init__(positiveTargetFieldValue, positiveTargetFieldDisplayValue, negativeTargetFieldValue, negativeTargetFieldDisplayValue, Extension, ROCGraph, )
# # XMLBehaviors # supermod.ROC.subclass = ROC # end class ROC
[docs]class ROCGraph(supermod.ROCGraph): def __init__(self, Extension=None, XCoordinates=None, YCoordinates=None, BoundaryValues=None): super(ROCGraph, self).__init__(Extension, XCoordinates, YCoordinates, BoundaryValues, )
# # XMLBehaviors # supermod.ROCGraph.subclass = ROCGraph # end class ROCGraph
[docs]class ConfusionMatrix(supermod.ConfusionMatrix): def __init__(self, Extension=None, ClassLabels=None, Matrix=None): super(ConfusionMatrix, self).__init__(Extension, ClassLabels, Matrix, )
# # XMLBehaviors # supermod.ConfusionMatrix.subclass = ConfusionMatrix # end class ConfusionMatrix
[docs]class ClassLabels(supermod.ClassLabels): def __init__(self, Extension=None, Array=None): super(ClassLabels, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.ClassLabels.subclass = ClassLabels # end class ClassLabels
[docs]class Correlations(supermod.Correlations): def __init__(self, Extension=None, CorrelationFields=None, CorrelationValues=None, CorrelationMethods=None): super(Correlations, self).__init__(Extension, CorrelationFields, CorrelationValues, CorrelationMethods, )
# # XMLBehaviors # supermod.Correlations.subclass = Correlations # end class Correlations
[docs]class CorrelationFields(supermod.CorrelationFields): def __init__(self, Extension=None, Array=None): super(CorrelationFields, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.CorrelationFields.subclass = CorrelationFields # end class CorrelationFields
[docs]class CorrelationValues(supermod.CorrelationValues): def __init__(self, Extension=None, Matrix=None): super(CorrelationValues, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.CorrelationValues.subclass = CorrelationValues # end class CorrelationValues
[docs]class CorrelationMethods(supermod.CorrelationMethods): def __init__(self, Extension=None, Matrix=None): super(CorrelationMethods, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.CorrelationMethods.subclass = CorrelationMethods # end class CorrelationMethods
[docs]class ModelVerification(supermod.ModelVerification): def __init__(self, recordCount=None, fieldCount=None, Extension=None, VerificationFields=None, InlineTable=None): super(ModelVerification, self).__init__(recordCount, fieldCount, Extension, VerificationFields, InlineTable, )
# # XMLBehaviors # supermod.ModelVerification.subclass = ModelVerification # end class ModelVerification
[docs]class VerificationFields(supermod.VerificationFields): def __init__(self, Extension=None, VerificationField=None): super(VerificationFields, self).__init__(Extension, VerificationField, )
# # XMLBehaviors # supermod.VerificationFields.subclass = VerificationFields # end class VerificationFields
[docs]class VerificationField(supermod.VerificationField): def __init__(self, field=None, column=None, precision=1E-6, zeroThreshold=1E-16, Extension=None): super(VerificationField, self).__init__(field, column, precision, zeroThreshold, Extension, )
# # XMLBehaviors # supermod.VerificationField.subclass = VerificationField # end class VerificationField
[docs]class MiningModel(supermod.MiningModel): """ MiningModel contains Segmentation element with a number of Segment elements as well as the attribute multipleModelMethod specifying how all the models applicable to a record should be combined. Parameters ---------- modelName : string element identifies the model with a unique name in the context of the PMML file functionName : string Stores what type of problems it is ex classification or regression algorithmName : string Stores algorithm name used in the model isScorable : indicates whether the model is valid for scoring MiningSchema : list the fields that have to be provided in order to apply the model Output : describes a set of result values that can be returned from a model Targets : The target values are derived from a variety of elements in the models LocalTransformations : Any pre-processing information goes here Regression : regression equation can be used to define an input transformation in another model which happens to be a TreeModel DecisionTree : DecisionTree contains the essential elements of a TreeModel Segmentation : Segmentation allows representation of different models for different data segments and also can be used for model ensembles and model sequences ModelVerification : ModelVerification schema provides a dataset of model inputs and known results that can be used to verify accurate results are generated, regardless of the environment """ def __init__(self, modelName=None, functionName=None, algorithmName=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, Regression=None, DecisionTree=None, Segmentation=None, ModelVerification=None, Extension=None): super(MiningModel, self).__init__(modelName, functionName, algorithmName, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, Regression, DecisionTree, Segmentation, ModelVerification, Extension, )
# # XMLBehaviors # supermod.MiningModel.subclass = MiningModel # end class MiningModel
[docs]class Segmentation(supermod.Segmentation): """ Segmentation allows representation of different models for different data segments and also can be used for model ensembles and model sequences Parameters ---------- multipleModelMethod : specifying how all the models applicable to a record should be combined Segment : Segment includes a PREDICATE element specifying the conditions under which that segment is to be used """ def __init__(self, multipleModelMethod=None, Extension=None, Segment=None): super(Segmentation, self).__init__(multipleModelMethod, Extension, Segment, )
# # XMLBehaviors # supermod.Segmentation.subclass = Segmentation # end class Segmentation
[docs]class Segment(supermod.Segment): """ Segment includes a PREDICATE element specifying the conditions under which that segment is to be used Parameters ---------- id : The value of id serves as a unique identifier for any given Node within the tree model SimplePredicate : defines a rule in the form of a simple boolean expression. The rule consists of field, operator (booleanOperator) for binary comparison, and value CompoundPredicate : an encapsulating element for combining two or more elements as defined at the entity PREDICATE SimpleSetPredicate : checks whether a field value is element of a set. The set of values is specified by the array True_ : a predicate element that identifies the boolean constant TRUE False_ : a predicate element that identifies the boolean constant False TreeModel : TreeModel in PMML allows for defining either a classification or prediction structure """ def __init__(self, id=None, weight='1', Extension=None, SimplePredicate=None, CompoundPredicate=None, SimpleSetPredicate=None, True_=None, False_=None, AssociationModel=None, BayesianNetworkModel=None, BaselineModel=None, ClusteringModel=None, DeepNetwork=None, GaussianProcessModel=None, GeneralRegressionModel=None, MiningModel=None, NaiveBayesModel=None, NearestNeighborModel=None, NeuralNetwork=None, RegressionModel=None, RuleSetModel=None, SequenceModel=None, Scorecard=None, SupportVectorMachineModel=None, TextModel=None, TimeSeriesModel=None, TreeModel=None): super(Segment, self).__init__(id, weight, Extension, SimplePredicate, CompoundPredicate, SimpleSetPredicate, True_, False_, AssociationModel, BayesianNetworkModel, BaselineModel, ClusteringModel, DeepNetwork, GaussianProcessModel, GeneralRegressionModel, MiningModel, NaiveBayesModel, NearestNeighborModel, NeuralNetwork, RegressionModel, RuleSetModel, SequenceModel, Scorecard, SupportVectorMachineModel, TextModel, TimeSeriesModel, TreeModel, )
# # XMLBehaviors # supermod.Segment.subclass = Segment # end class Segment
[docs]class ResultField(supermod.ResultField): def __init__(self, name=None, displayName=None, optype=None, dataType=None, feature=None, value=None, Extension=None): super(ResultField, self).__init__(name, displayName, optype, dataType, feature, value, Extension, )
# # XMLBehaviors # supermod.ResultField.subclass = ResultField # end class ResultField
[docs]class Regression(supermod.Regression): def __init__(self, modelName=None, functionName=None, algorithmName=None, normalizationMethod='none', Extension=None, Output=None, ModelStats=None, Targets=None, LocalTransformations=None, ResultField=None, RegressionTable=None): super(Regression, self).__init__(modelName, functionName, algorithmName, normalizationMethod, Extension, Output, ModelStats, Targets, LocalTransformations, ResultField, RegressionTable, )
# # XMLBehaviors # supermod.Regression.subclass = Regression # end class Regression
[docs]class DecisionTree(supermod.DecisionTree): def __init__(self, modelName=None, functionName=None, algorithmName=None, missingValueStrategy='none', missingValuePenalty='1.0', noTrueChildStrategy='returnNullPrediction', splitCharacteristic='multiSplit', Extension=None, Output=None, ModelStats=None, Targets=None, LocalTransformations=None, ResultField=None, Node=None): super(DecisionTree, self).__init__(modelName, functionName, algorithmName, missingValueStrategy, missingValuePenalty, noTrueChildStrategy, splitCharacteristic, Extension, Output, ModelStats, Targets, LocalTransformations, ResultField, Node, )
# # XMLBehaviors # supermod.DecisionTree.subclass = DecisionTree # end class DecisionTree
[docs]class NaiveBayesModel(supermod.NaiveBayesModel): """ Naïve Bayes uses Bayes' Theorem, combined with a ("naive") presumption of conditional independence, to predict the value of a target (output), from evidence given by one or more predictor (input) fields Parameters ---------- modelName : element identifies the model with a unique name in the context of the PMML file functionName : Stores what type of problems it is ex classification or regression algorithmName : Stores algorithm name used in the model isScorable : The isScorable attribute indicates whether the model is valid for scoring MiningSchema : list the fields that have to be provided in order to apply the model Output : describes a set of result values that can be returned from a model BayesInputs : element contains several BayesInput elements BayesOutput : BayesOutput contains the counts associated with the values of the target field """ def __init__(self, modelName=None, threshold=None, functionName=None, algorithmName=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, BayesInputs=None, BayesOutput=None, ModelVerification=None, Extension=None): super(NaiveBayesModel, self).__init__(modelName, threshold, functionName, algorithmName, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, BayesInputs, BayesOutput, ModelVerification, Extension, )
# # XMLBehaviors # supermod.NaiveBayesModel.subclass = NaiveBayesModel # end class NaiveBayesModel
[docs]class BayesInputs(supermod.BayesInputs): """ BayesInputs element contains several BayesInput elements. Parameters ---------- BayesInput: contains the counts pairing the discrete values of that field with those of the target field """ def __init__(self, Extension=None, BayesInput=None): super(BayesInputs, self).__init__(Extension, BayesInput, )
# # XMLBehaviors # supermod.BayesInputs.subclass = BayesInputs # end class BayesInputs
[docs]class BayesInput(supermod.BayesInput): """ BayesInput contains the counts pairing the discrete values of that field with those of the target field Parameters ---------- fieldName: Name of the input field TargetValueStats: TargetValueStats serves as the envelope for element TargetValueStat DerivedField: which provides a common element for the various mappings PairCounts: PairCounts lists, for a field Ii's discrete value Iij, the TargetValueCounts that pair the value Iij with each value of the target field """ def __init__(self, fieldName=None, Extension=None, TargetValueStats=None, DerivedField=None, PairCounts=None): super(BayesInput, self).__init__(fieldName, Extension, TargetValueStats, DerivedField, PairCounts, )
# # XMLBehaviors # supermod.BayesInput.subclass = BayesInput # end class BayesInput
[docs]class BayesOutput(supermod.BayesOutput): """ BayesOutput contains the counts associated with the values of the target field. Parameters ---------- fieldName: Name of the output field TargetValueCounts: TargetValueCounts lists the counts associated with each value of the target field """ def __init__(self, fieldName=None, Extension=None, TargetValueCounts=None): super(BayesOutput, self).__init__(fieldName, Extension, TargetValueCounts, )
# # XMLBehaviors # supermod.BayesOutput.subclass = BayesOutput # end class BayesOutput
[docs]class TargetValueStats(supermod.TargetValueStats): """ TargetValueStats serves as the envelope for element TargetValueStat. It is used for a continuous input field Ii to define statistical measures associated with each value of the target field. """ def __init__(self, Extension=None, TargetValueStat=None): super(TargetValueStats, self).__init__(Extension, TargetValueStat, )
# # XMLBehaviors # supermod.TargetValueStats.subclass = TargetValueStats # end class TargetValueStats
[docs]class TargetValueStat(supermod.TargetValueStat): def __init__(self, value=None, AnyDistribution=None, GaussianDistribution=None, PoissonDistribution=None, UniformDistribution=None, Extension=None): super(TargetValueStat, self).__init__(value, AnyDistribution, GaussianDistribution, PoissonDistribution, UniformDistribution, Extension, )
# # XMLBehaviors # supermod.TargetValueStat.subclass = TargetValueStat # end class TargetValueStat
[docs]class PairCounts(supermod.PairCounts): def __init__(self, value=None, Extension=None, TargetValueCounts=None): super(PairCounts, self).__init__(value, Extension, TargetValueCounts, )
# # XMLBehaviors # supermod.PairCounts.subclass = PairCounts # end class PairCounts
[docs]class TargetValueCounts(supermod.TargetValueCounts): """ TargetValueCounts lists the counts associated with each value of the target field. """ def __init__(self, Extension=None, TargetValueCount=None): super(TargetValueCounts, self).__init__(Extension, TargetValueCount, )
# # XMLBehaviors # supermod.TargetValueCounts.subclass = TargetValueCounts # end class TargetValueCounts
[docs]class TargetValueCount(supermod.TargetValueCount): def __init__(self, value=None, count=None, Extension=None): super(TargetValueCount, self).__init__(value, count, Extension, )
# # XMLBehaviors # supermod.TargetValueCount.subclass = TargetValueCount # end class TargetValueCount
[docs]class NeuralNetwork(supermod.NeuralNetwork): """ this Defines the structure of Neural Networks Parameters ---------- modelName : this stores Name of the Model functionName : Stores what type of problems it is ex classification or regression algorithmName : Stores algorithm name used in the model activationFunction: Stores the activation function used in building model isScorable: indicates whether the model is valid for scoring MiningSchema : Stores the MiningField in building PMML Output : Stores the output field NeuralInputs: defines how input fields are normalized so that the values can be processed in the neural network NeuralLayer : defines how neurons are stroed in different NeuralLayer NeuralOutputs : defines how the output of the neural network must be interpreted """ def __init__(self, modelName=None, functionName=None, algorithmName=None, activationFunction=None, normalizationMethod='none', threshold='0', width=None, altitude='1.0', numberOfLayers=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, NeuralInputs=None, NeuralLayer=None, NeuralOutputs=None, ModelVerification=None, Extension=None): super(NeuralNetwork, self).__init__(modelName, functionName, algorithmName, activationFunction, normalizationMethod, threshold, width, altitude, numberOfLayers, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, NeuralInputs, NeuralLayer, NeuralOutputs, ModelVerification, Extension, ) # # XMLBehaviors #
[docs] def set_NeuralLayer(self, NeuralLayer, *args): self.NeuralLayer = NeuralLayer self.numberOfLayers = len(self.NeuralLayer)
[docs] def set_NeuralLayer_wrapper(self, NeuralLayer, *args): result = self.set_NeuralLayer(NeuralLayer, *args) return result
[docs] def add_NeuralLayer(self, value, *args): self.NeuralLayer.append(value) self.numberOfLayers = len(self.NeuralLayer)
[docs] def add_NeuralLayer_wrapper(self, value, *args): result = self.add_NeuralLayer(value, *args) return result
[docs] def insert_NeuralLayer_at(self, index, value, *args): self.NeuralLayer.insert(index, value) self.numberOfLayers = len(self.NeuralLayer)
[docs] def insert_NeuralLayer_at_wrapper(self, index, value, *args): result = self.insert_NeuralLayer_at(index, value, *args) return result
supermod.NeuralNetwork.subclass = NeuralNetwork # end class NeuralNetwork
[docs]class NeuralInputs(supermod.NeuralInputs): """ NeuralInputs Defines all the NeuralInput in InputLayer Parameters ---------- numberOfInputs: Describe the number of input features used in Model building NeuralInput: Defines how input fields are normalized so that the values can be processed in the neural network """ def __init__(self, numberOfInputs=None, Extension=None, NeuralInput=None): super(NeuralInputs, self).__init__(numberOfInputs, Extension, NeuralInput, ) # # XMLBehaviors #
[docs] def set_NeuralInput(self, NeuralInput, *args): self.NeuralInput = NeuralInput self.numberOfInputs = len(NeuralInput)
[docs] def set_NeuralInput_wrapper(self, NeuralInput, *args): result = self.set_NeuralInput(NeuralInput, *args) return result
[docs] def add_NeuralInput(self, value, *args): self.NeuralInput.append(value) self.numberOfInputs = len(self.NeuralInput)
[docs] def add_NeuralInput_wrapper(self, value, *args): result = self.add_NeuralInput(value, *args) return result
[docs] def insert_NeuralInput_at(self, index, value, *args): self.NeuralInput.insert(index, value) self.numberOfInputs = len(self.NeuralInput)
[docs] def insert_NeuralInput_at_wrapper(self, index, value, *args): result = self.insert_NeuralInput_at(index, value, *args) return result
supermod.NeuralInputs.subclass = NeuralInputs # end class NeuralInputs
[docs]class NeuralLayer(supermod.NeuralLayer): """ Neural Layer describe the Structure of the neural network i.e. number of layer and number of neuron in each layer etc. Parameters ---------- numberOfNeurons : Indicate the number of neurons in each Layer activationFunction : Used to determine the output of neural network like yes or no. It maps the resulting values in between 0 to 1 or -1 to 1 etc. threshold : In threshold function, any values above or equal to a given threshold are converted to 1, while anything falling below it is converted to a 0 during activation. width : a positive number describing the width for the radial basis function unit stored either in Neuron element or in NeuralLayer or even in NeuralNetwork. altitude : a positive number stored in Neuron or NeuralLayer or NeuralNetwork. The default is altitude="1.0", for that value the activation function reduces to the simple exp(-Z) Neuron : All incoming connections for a certain neuron are contained in the corresponding Neuron element """ def __init__(self, numberOfNeurons=None, activationFunction=None, threshold=None, width=None, altitude=None, normalizationMethod=None, Extension=None, Neuron=None): super(NeuralLayer, self).__init__(numberOfNeurons, activationFunction, threshold, width, altitude, normalizationMethod, Extension, Neuron, ) # # XMLBehaviors #
[docs] def set_Neuron(self, Neuron, *args): self.Neuron = Neuron self.numberOfNeurons = len(self.Neuron)
[docs] def set_Neuron_wrapper(self, Neuron, *args): result = self.set_Neuron(Neuron, *args) return result
[docs] def add_Neuron(self, value, *args): self.Neuron.append(value) self.numberOfNeurons = len(self.Neuron)
[docs] def add_Neuron_wrapper(self, value, *args): result = self.add_Neuron(value, *args) return result
[docs] def insert_Neuron_at(self, index, value, *args): self.Neuron.insert(index, value) self.numberOfNeurons = len(self.Neuron)
[docs] def insert_Neuron_at_wrapper(self, index, value, *args): result = self.insert_Neuron_at(index, value, *args) return result
supermod.NeuralLayer.subclass = NeuralLayer # end class NeuralLayer
[docs]class NeuralOutputs(supermod.NeuralOutputs): """ It stores the whole part of output layer Parameters ---------- numberOfOutputs : Represents the number of output neurons in Output layer NeuralOutput : defines how the output of the neural network must be interpreted """ def __init__(self, numberOfOutputs=None, Extension=None, NeuralOutput=None): super(NeuralOutputs, self).__init__(numberOfOutputs, Extension, NeuralOutput, ) # # XMLBehaviors #
[docs] def set_NeuralOutput(self, NeuralOutput, *args): self.Neuron = Neuron self.numberOfNeurons = len(self.Neuron)
[docs] def set_NeuralOutput_wrapper(self, NeuralOutput, *args): result = self.set_NeuralOutput(NeuralOutput, *args) return result
[docs] def add_NeuralOutput(self, value, *args): self.NeuralOutput.append(value) self.numberOfOutputs = len(self.NeuralOutput)
[docs] def add_NeuralOutput_wrapper(self, value, *args): result = self.add_NeuralOutput(value, *args) return result
[docs] def insert_NeuralOutput_at(self, index, value, *args): self.NeuralOutput.insert(index, value) self.numberOfOutputs = len(self.NeuralOutput)
[docs] def insert_NeuralOutput_at_wrapper(self, index, value, *args): result = self.insert_NeuralOutput_at(index, value, *args) return result
supermod.NeuralOutputs.subclass = NeuralOutputs # end class NeuralOutputs
[docs]class NeuralInput(supermod.NeuralInput): """ defines how input fields are normalized so that the values can be processed in the neural network Parameters ---------- id : an identifier id which must be unique in all layers DerivedField: which provides a common element for the various mappings. They can also appear at several places in the definition of specific models such as neural network or Naïve Bayes models """ def __init__(self, id=None, Extension=None, DerivedField=None): super(NeuralInput, self).__init__(id, Extension, DerivedField, )
# # XMLBehaviors # supermod.NeuralInput.subclass = NeuralInput # end class NeuralInput
[docs]class Neuron(supermod.Neuron): """ All incoming connections for a certain neuron are contained in the corresponding Neuron element Parameters ---------- id : an identifier which must be unique in all layers bias : The attribute bias implicitly defines a connection to a bias unit where the unit's value is 1.0 and the weight is the value of bias width : a positive number describing the width for the radial basis function unit stored either in Neuron element or in NeuralLayer or even in NeuralNetwork altitude : a positive number stored in Neuron or NeuralLayer or NeuralNetwork Con : Each connection Con of the element Neuron stores the ID of a node it comes from and the weight """ def __init__(self, id=None, bias=None, width=None, altitude=None, Extension=None, Con=None): super(Neuron, self).__init__(id, bias, width, altitude, Extension, Con, )
# # XMLBehaviors # supermod.Neuron.subclass = Neuron # end class Neuron
[docs]class Con(supermod.Con): def __init__(self, from_=None, weight=None, Extension=None): super(Con, self).__init__(from_, weight, Extension, )
# # XMLBehaviors # supermod.Con.subclass = Con # end class Con
[docs]class NeuralOutput(supermod.NeuralOutput): """ defines how the output of the neural network must be interpreted Parameters ---------- outputNeuron : It represent the id's of output neuron in output layer DerivedField : which provides a common element for the various mappings. They can also appear at several places in the definition of specific models such as neural network or Naïve Bayes models """ def __init__(self, outputNeuron=None, Extension=None, DerivedField=None): super(NeuralOutput, self).__init__(outputNeuron, Extension, DerivedField, )
# # XMLBehaviors # supermod.NeuralOutput.subclass = NeuralOutput # end class NeuralOutput
[docs]class Output(supermod.Output): """ describes a set of result values that can be returned from a model. Parameters ---------- OutputField : OutputField elements specify names, types and rules for calculating specific result features """ def __init__(self, Extension=None, OutputField=None): super(Output, self).__init__(Extension, OutputField, )
# # XMLBehaviors # supermod.Output.subclass = Output # end class Output
[docs]class OutputField(supermod.OutputField): """ elements specify names, types and rules for calculating specific result features. Parameters ---------- name : specifies the name of a the OutputField optype : indicate admissible operations on the values. A clusterId field, for example, can have integer as its dataType, but categorical as its opType. For details, see the description of DataDictionary dataType: specifies the data type for the output column targetField : must refer either to a MiningField of usage type target or a field described in Targets element feature : specifies the value the output field takes from the computed mining result value : used in conjunction with result features referring to specific values ruleFeature : specifies which feature of an association rule to return algorithm : specifies which scoring algorithm to use when computing the output value rank : specify the rank of the feature value from the mining result that should be selected rankBasis : specify which criterion is used to sort the output result rankOrder : determines the sorting order when ranking the results. The default behavior (rankOrder="descending") indicates that the result with the highest rank will appear first on the sorted list isMultiValued : indicates that the output can represent multiple output values segmentId : applicable to MiningModels which utilize Segmentation isFinalResult : indicate whether the result should be returned to the user or is only used as input to another OutputField that descrbes a transformed value Decisions : Derive a decision from the output of a data mining model FieldRef : Field references are simply pass-throughs to fields previously defined in the DataDictionary, a DerivedField, or a result field Constant : used in expressions which have multiple arguments. The actual value of a constant is given by the content of the element NormContinuous : defines how to normalize an input field by piecewise linear interpolation NormDiscrete : refer to a certain input field define a fan-out function which maps a single input field to a set of normalized fields Discretize : Discretization of numerical input fields is a mapping from continuous to discrete values using intervals MapValues : element can be used to create missing value indicators for categorical variables TextIndex : TextIndex expression to extract frequency information from the text input field, for a given term. The TextIndex element fully configures how the text input should be indexed, including case sensitivity, normalization and other settings Aggregate : summarize or collect groups of values, e.g., compute average Lag : defined as the value of the given input field a fixed number of records prior to the current one,If the desired value is not present, for a given record, the lag will be set to missing """ def __init__(self, name=None, displayName=None, optype=None, dataType=None, targetField=None, feature='predictedValue', value=None, numTopCategories=None, ruleFeature='consequent', algorithm='exclusiveRecommendation', rank='1', rankBasis='confidence', rankOrder='descending', isMultiValued='0', segmentId=None, isFinalResult=True, Extension=None, Decisions=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(OutputField, self).__init__(name, displayName, optype, dataType, targetField, feature, value, numTopCategories, ruleFeature, algorithm, rank, rankBasis, rankOrder, isMultiValued, segmentId, isFinalResult, Extension, Decisions, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, ) # # XMLBehaviors #
[docs] def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='OutputFields', *args): if self.name is not None and 'name' not in already_processed: already_processed.add('name') outfile.write(' name=%s' % (supermod.quote_attrib(self.name), )) if self.displayName is not None and 'displayName' not in already_processed: already_processed.add('displayName') outfile.write(' displayName=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.displayName), input_name='displayName')), )) if self.optype is not None and 'optype' not in already_processed: already_processed.add('optype') outfile.write(' optype=%s' % (supermod.quote_attrib(self.optype), )) if self.dataType is not None and 'dataType' not in already_processed: already_processed.add('dataType') outfile.write(' dataType=%s' % (supermod.quote_attrib(self.dataType), )) if self.targetField is not None and 'targetField' not in already_processed: already_processed.add('targetField') outfile.write(' targetField=%s' % (supermod.quote_attrib(self.targetField), )) if self.feature is not None and 'feature' not in already_processed: already_processed.add('feature') outfile.write(' feature=%s' % (supermod.quote_attrib(self.feature), )) if self.value is not None and 'value' not in already_processed: already_processed.add('value') outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.value), input_name='value')), )) if self.ruleFeature != "consequent" and 'ruleFeature' not in already_processed: already_processed.add('ruleFeature') outfile.write(' ruleFeature=%s' % (supermod.quote_attrib(self.ruleFeature), )) if self.algorithm != "exclusiveRecommendation" and 'algorithm' not in already_processed: already_processed.add('algorithm') outfile.write(' algorithm=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.algorithm), input_name='algorithm')), )) # if self.rank is not None and 'rank' not in already_processed: # already_processed.add('rank') # outfile.write(' rank=%s' % (supermod.quote_attrib(self.rank), )) if self.rankBasis != "confidence" and 'rankBasis' not in already_processed: already_processed.add('rankBasis') outfile.write(' rankBasis=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.rankBasis), input_name='rankBasis')), )) if self.rankOrder != "descending" and 'rankOrder' not in already_processed: already_processed.add('rankOrder') outfile.write(' rankOrder=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.rankOrder), input_name='rankOrder')), )) if self.isMultiValued != "0" and 'isMultiValued' not in already_processed: already_processed.add('isMultiValued') outfile.write(' isMultiValued=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.isMultiValued), input_name='isMultiValued')), )) if self.segmentId is not None and 'segmentId' not in already_processed: already_processed.add('segmentId') outfile.write(' segmentId=%s' % (self.gds_encode(self.gds_format_string(supermod.quote_attrib(self.segmentId), input_name='segmentId')), )) if not self.isFinalResult and 'isFinalResult' not in already_processed: already_processed.add('isFinalResult') outfile.write(' isFinalResult="%s"' % self.gds_format_boolean(self.isFinalResult, input_name='isFinalResult')) if self.numTopCategories is not None and 'numTopCategories' not in already_processed: already_processed.add('numTopCategories') outfile.write(' numTopCategories=%s' % (supermod.quote_attrib(self.numTopCategories), ))
[docs] def exportAttributes_wrapper(self, outfile, level, already_processed, namespace_='', name_='OutputFields', *args): result = self.exportAttributes(outfile, level, already_processed, namespace_='', name_='OutputFields', *args) return result
supermod.OutputField.subclass = OutputField # end class OutputField
[docs]class Decisions(supermod.Decisions): def __init__(self, businessProblem=None, description=None, Extension=None, Decision=None): super(Decisions, self).__init__(businessProblem, description, Extension, Decision, )
# # XMLBehaviors # supermod.Decisions.subclass = Decisions # end class Decisions
[docs]class Decision(supermod.Decision): """ Derive a decision from the output of a data mining model. For this result feature, OutputField must contain an EXPRESSION, unless it is used to refer to a decision of segment model through the segmentID attribute. """ def __init__(self, value=None, displayValue=None, description=None, Extension=None): super(Decision, self).__init__(value, displayValue, description, Extension, )
# # XMLBehaviors # supermod.Decision.subclass = Decision # end class Decision
[docs]class RegressionModel(supermod.RegressionModel): """ The root element of an XML regression model. Each instance of a regression model must start with this element Parameters ---------- modelName : This is a unique identifier specifying the name of the regression model functionName : Can be regression or classification algorithmName : Can be any string describing the algorithm that was used while creating the model modelType: Specifies the type of a regression model. The attribute modelType is for information only. It has been changed to optional and the usage is deprecated. Use functionName and normalizationMethod in order to define the computation. Use algorithmName in order to give further optional information targetFieldName : The name of the target field (also called dependent variable). The attribute targetFieldName is for information only. It has been changed to optional and the usage is deprecated. Use usageType="target" in MiningField instead isScorable : This attribute indicates if the model is valid for scoring. If this attribute is true or if it is missing, then the model should be processed normally. However, if the attribute is false, then the model producer has indicated that this model is intended for information purposes only and should not be used to generate results. In order to be valid PMML, all required elements and attributes must be present, even for non-scoring models RegressionTable : A table that lists the values of all predictors or independent variables. If the model is used to predict a numerical field, then there is only one RegressionTable and the attribute targetCategory may be missing. If the model is used to predict a categorical field, then there are two or more RegressionTables and each one must have the attribute targetCategory defined with a unique value """ def __init__(self, modelName=None, functionName=None, algorithmName=None, modelType=None, targetFieldName=None, normalizationMethod='none', isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, RegressionTable=None, ModelVerification=None, Extension=None): super(RegressionModel, self).__init__(modelName, functionName, algorithmName, modelType, targetFieldName, normalizationMethod, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, RegressionTable, ModelVerification, Extension, )
# # XMLBehaviors # supermod.RegressionModel.subclass = RegressionModel # end class RegressionModel
[docs]class RegressionTable(supermod.RegressionTable): """ A table that lists the values of all predictors or independent variables. If the model is used to predict a numerical field, then there is only one RegressionTable and the attribute targetCategory may be missing. If the model is used to predict a categorical field, then there are two or more RegressionTables and each one must have the attribute targetCategory defined with a unique value Parameters ---------- NumericPredictor : Defines a numeric independent variable. The list of valid attributes comprises the name of the variable, the exponent to be used, and the coefficient by which the values of this variable must be multiplied. Note that the exponent defaults to 1, hence it is not always necessary to specify. Also, if the input value is missing, the result evaluates to a missing value CategoricalPredictor : Defines a categorical independent variable. The list of attributes comprises the name of the variable, the value attribute, and the coefficient by which the values of this variable must be multiplied. To do a regression analysis with categorical values, some means must be applied to enable calculations. If the specified value of an independent value occurs, the term variable_name(value) is replaced with 1. Thus the coefficient is multiplied by 1. If the value does not occur, the term variable_name(value) is replaced with 0 so that the product coefficient × variable_name(value) yields 0. Consequently, the product is ignored in the ongoing analysis. If the input value is missing then variable_name(v) yields 0 for any v PredictorTerm : Contains one or more fields that are combined by multiplication. That is, this element supports interaction terms. The type of all fields referenced within PredictorTerm must be continuous. Note that if the input value is missing, the result evaluates to a missing value. The name attribute allows this term to be referenced by elements of Statistics and should be unique from any other field names with the scope of this RegressionModel The content of PredictorTerm might be extended to a sequence of any expression. This feature is not yet needed """ def __init__(self, intercept=None, targetCategory=None, Extension=None, NumericPredictor=None, CategoricalPredictor=None, PredictorTerm=None): super(RegressionTable, self).__init__(intercept, targetCategory, Extension, NumericPredictor, CategoricalPredictor, PredictorTerm, )
# # XMLBehaviors # supermod.RegressionTable.subclass = RegressionTable # end class RegressionTable
[docs]class NumericPredictor(supermod.NumericPredictor): """ Defines a numeric independent variable. The list of valid attributes comprises the name of the variable, the exponent to be used, and the coefficient by which the values of this variable must be multiplied. Note that the exponent defaults to 1, hence it is not always necessary to specify. Also, if the input value is missing, the result evaluates to a missing value. """ def __init__(self, name=None, exponent='1', coefficient=None, Extension=None): super(NumericPredictor, self).__init__(name, exponent, coefficient, Extension, )
# # XMLBehaviors # supermod.NumericPredictor.subclass = NumericPredictor # end class NumericPredictor
[docs]class CategoricalPredictor(supermod.CategoricalPredictor): def __init__(self, name=None, value=None, coefficient=None, Extension=None): super(CategoricalPredictor, self).__init__(name, value, coefficient, Extension, )
# # XMLBehaviors # supermod.CategoricalPredictor.subclass = CategoricalPredictor # end class CategoricalPredictor
[docs]class PredictorTerm(supermod.PredictorTerm): """ Contains one or more fields that are combined by multiplication. That is, this element supports interaction terms. The type of all fields referenced within PredictorTerm must be continuous. Note that if the input value is missing, the result evaluates to a missing value. The name attribute allows this term to be referenced by elements of Statistics and should be unique from any other field names with the scope of this RegressionModel. The content of PredictorTerm might be extended to a sequence of any expression. This feature is not yet needed. """ def __init__(self, name=None, coefficient=None, Extension=None, FieldRef=None): super(PredictorTerm, self).__init__(name, coefficient, Extension, FieldRef, )
# # XMLBehaviors # supermod.PredictorTerm.subclass = PredictorTerm # end class PredictorTerm
[docs]class RuleSetModel(supermod.RuleSetModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, RuleSet=None, ModelVerification=None, Extension=None): super(RuleSetModel, self).__init__(modelName, functionName, algorithmName, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, RuleSet, ModelVerification, Extension, )
# # XMLBehaviors # supermod.RuleSetModel.subclass = RuleSetModel # end class RuleSetModel
[docs]class RuleSet(supermod.RuleSet): def __init__(self, recordCount=None, nbCorrect=None, defaultScore=None, defaultConfidence=None, Extension=None, RuleSelectionMethod=None, ScoreDistribution=None, SimpleRule=None, CompoundRule=None): super(RuleSet, self).__init__(recordCount, nbCorrect, defaultScore, defaultConfidence, Extension, RuleSelectionMethod, ScoreDistribution, SimpleRule, CompoundRule, )
# # XMLBehaviors # supermod.RuleSet.subclass = RuleSet # end class RuleSet
[docs]class RuleSelectionMethod(supermod.RuleSelectionMethod): def __init__(self, criterion=None, Extension=None): super(RuleSelectionMethod, self).__init__(criterion, Extension, )
# # XMLBehaviors # supermod.RuleSelectionMethod.subclass = RuleSelectionMethod # end class RuleSelectionMethod
[docs]class SimpleRule(supermod.SimpleRule): def __init__(self, id=None, score=None, recordCount=None, nbCorrect=None, confidence='1', weight='1', Extension=None, SimplePredicate=None, CompoundPredicate=None, SimpleSetPredicate=None, True_=None, False_=None, ScoreDistribution=None): super(SimpleRule, self).__init__(id, score, recordCount, nbCorrect, confidence, weight, Extension, SimplePredicate, CompoundPredicate, SimpleSetPredicate, True_, False_, ScoreDistribution, )
# # XMLBehaviors # supermod.SimpleRule.subclass = SimpleRule # end class SimpleRule
[docs]class CompoundRule(supermod.CompoundRule): def __init__(self, Extension=None, SimplePredicate=None, CompoundPredicate=None, SimpleSetPredicate=None, True_=None, False_=None, SimpleRule=None, CompoundRule_member=None): super(CompoundRule, self).__init__(Extension, SimplePredicate, CompoundPredicate, SimpleSetPredicate, True_, False_, SimpleRule, CompoundRule_member, )
# # XMLBehaviors # supermod.CompoundRule.subclass = CompoundRule # end class CompoundRule
[docs]class Scorecard(supermod.Scorecard): def __init__(self, modelName=None, functionName=None, algorithmName=None, initialScore='0', useReasonCodes=True, reasonCodeAlgorithm='pointsBelow', baselineScore=None, baselineMethod='other', isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, Characteristics=None, ModelVerification=None, Extension=None): super(Scorecard, self).__init__(modelName, functionName, algorithmName, initialScore, useReasonCodes, reasonCodeAlgorithm, baselineScore, baselineMethod, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, Characteristics, ModelVerification, Extension, )
# # XMLBehaviors # supermod.Scorecard.subclass = Scorecard # end class Scorecard
[docs]class Characteristics(supermod.Characteristics): def __init__(self, Extension=None, Characteristic=None): super(Characteristics, self).__init__(Extension, Characteristic, )
# # XMLBehaviors # supermod.Characteristics.subclass = Characteristics # end class Characteristics
[docs]class Characteristic(supermod.Characteristic): def __init__(self, name=None, reasonCode=None, baselineScore=None, Extension=None, Attribute=None): super(Characteristic, self).__init__(name, reasonCode, baselineScore, Extension, Attribute, )
# # XMLBehaviors # supermod.Characteristic.subclass = Characteristic # end class Characteristic
[docs]class Attribute(supermod.Attribute): def __init__(self, reasonCode=None, partialScore=None, Extension=None, SimplePredicate=None, CompoundPredicate=None, SimpleSetPredicate=None, True_=None, False_=None, ComplexPartialScore=None): super(Attribute, self).__init__(reasonCode, partialScore, Extension, SimplePredicate, CompoundPredicate, SimpleSetPredicate, True_, False_, ComplexPartialScore, )
# # XMLBehaviors # supermod.Attribute.subclass = Attribute # end class Attribute
[docs]class ComplexPartialScore(supermod.ComplexPartialScore): def __init__(self, Extension=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None): super(ComplexPartialScore, self).__init__(Extension, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, )
# # XMLBehaviors # supermod.ComplexPartialScore.subclass = ComplexPartialScore # end class ComplexPartialScore
[docs]class SequenceModel(supermod.SequenceModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, numberOfTransactions=None, maxNumberOfItemsPerTransaction=None, avgNumberOfItemsPerTransaction=None, numberOfTransactionGroups=None, maxNumberOfTAsPerTAGroup=None, avgNumberOfTAsPerTAGroup=None, isScorable=True, MiningSchema=None, ModelStats=None, LocalTransformations=None, Constraints=None, Item=None, Itemset=None, SetPredicate=None, Sequence=None, SequenceRule=None, Extension=None): super(SequenceModel, self).__init__(modelName, functionName, algorithmName, numberOfTransactions, maxNumberOfItemsPerTransaction, avgNumberOfItemsPerTransaction, numberOfTransactionGroups, maxNumberOfTAsPerTAGroup, avgNumberOfTAsPerTAGroup, isScorable, MiningSchema, ModelStats, LocalTransformations, Constraints, Item, Itemset, SetPredicate, Sequence, SequenceRule, Extension, )
# # XMLBehaviors # supermod.SequenceModel.subclass = SequenceModel # end class SequenceModel
[docs]class Constraints(supermod.Constraints): def __init__(self, minimumNumberOfItems='1', maximumNumberOfItems=None, minimumNumberOfAntecedentItems='1', maximumNumberOfAntecedentItems=None, minimumNumberOfConsequentItems='1', maximumNumberOfConsequentItems=None, minimumSupport='0', minimumConfidence='0', minimumLift='0', minimumTotalSequenceTime='0', maximumTotalSequenceTime=None, minimumItemsetSeparationTime='0', maximumItemsetSeparationTime=None, minimumAntConsSeparationTime='0', maximumAntConsSeparationTime=None, Extension=None): super(Constraints, self).__init__(minimumNumberOfItems, maximumNumberOfItems, minimumNumberOfAntecedentItems, maximumNumberOfAntecedentItems, minimumNumberOfConsequentItems, maximumNumberOfConsequentItems, minimumSupport, minimumConfidence, minimumLift, minimumTotalSequenceTime, maximumTotalSequenceTime, minimumItemsetSeparationTime, maximumItemsetSeparationTime, minimumAntConsSeparationTime, maximumAntConsSeparationTime, Extension, )
# # XMLBehaviors # supermod.Constraints.subclass = Constraints # end class Constraints
[docs]class SetPredicate(supermod.SetPredicate): def __init__(self, id=None, field=None, operator=None, Extension=None, Array=None): super(SetPredicate, self).__init__(id, field, operator, Extension, Array, )
# # XMLBehaviors # supermod.SetPredicate.subclass = SetPredicate # end class SetPredicate
[docs]class Delimiter(supermod.Delimiter): def __init__(self, delimiter=None, gap=None, Extension=None): super(Delimiter, self).__init__(delimiter, gap, Extension, )
# # XMLBehaviors # supermod.Delimiter.subclass = Delimiter # end class Delimiter
[docs]class Time(supermod.Time): def __init__(self, min=None, max=None, mean=None, standardDeviation=None, Extension=None): super(Time, self).__init__(min, max, mean, standardDeviation, Extension, )
# # XMLBehaviors # supermod.Time.subclass = Time # end class Time
[docs]class Sequence(supermod.Sequence): def __init__(self, id=None, numberOfSets=None, occurrence=None, support=None, Extension=None, Delimiter=None, SetReference=None, Time=None): super(Sequence, self).__init__(id, numberOfSets, occurrence, support, Extension, Delimiter, SetReference, Time, )
# # XMLBehaviors # supermod.Sequence.subclass = Sequence # end class Sequence
[docs]class SetReference(supermod.SetReference): def __init__(self, setId=None, Extension=None): super(SetReference, self).__init__(setId, Extension, )
# # XMLBehaviors # supermod.SetReference.subclass = SetReference # end class SetReference
[docs]class SequenceRule(supermod.SequenceRule): def __init__(self, id=None, numberOfSets=None, occurrence=None, support=None, confidence=None, lift=None, Extension=None, AntecedentSequence=None, Delimiter=None, ConsequentSequence=None, Time=None): super(SequenceRule, self).__init__(id, numberOfSets, occurrence, support, confidence, lift, Extension, AntecedentSequence, Delimiter, ConsequentSequence, Time, )
# # XMLBehaviors # supermod.SequenceRule.subclass = SequenceRule # end class SequenceRule
[docs]class SequenceReference(supermod.SequenceReference): def __init__(self, seqId=None, Extension=None): super(SequenceReference, self).__init__(seqId, Extension, )
# # XMLBehaviors # supermod.SequenceReference.subclass = SequenceReference # end class SequenceReference
[docs]class AntecedentSequence(supermod.AntecedentSequence): def __init__(self, Extension=None, SequenceReference=None, Time=None): super(AntecedentSequence, self).__init__(Extension, SequenceReference, Time, )
# # XMLBehaviors # supermod.AntecedentSequence.subclass = AntecedentSequence # end class AntecedentSequence
[docs]class ConsequentSequence(supermod.ConsequentSequence): def __init__(self, Extension=None, SequenceReference=None, Time=None): super(ConsequentSequence, self).__init__(Extension, SequenceReference, Time, )
# # XMLBehaviors # supermod.ConsequentSequence.subclass = ConsequentSequence # end class ConsequentSequence
[docs]class ModelStats(supermod.ModelStats): def __init__(self, Extension=None, UnivariateStats=None, MultivariateStats=None): super(ModelStats, self).__init__(Extension, UnivariateStats, MultivariateStats, )
# # XMLBehaviors # supermod.ModelStats.subclass = ModelStats # end class ModelStats
[docs]class UnivariateStats(supermod.UnivariateStats): def __init__(self, field=None, weighted='0', Extension=None, Counts=None, NumericInfo=None, DiscrStats=None, ContStats=None, Anova=None): super(UnivariateStats, self).__init__(field, weighted, Extension, Counts, NumericInfo, DiscrStats, ContStats, Anova, )
# # XMLBehaviors # supermod.UnivariateStats.subclass = UnivariateStats # end class UnivariateStats
[docs]class Counts(supermod.Counts): def __init__(self, totalFreq=None, missingFreq=None, invalidFreq=None, cardinality=None, Extension=None): super(Counts, self).__init__(totalFreq, missingFreq, invalidFreq, cardinality, Extension, )
# # XMLBehaviors # supermod.Counts.subclass = Counts # end class Counts
[docs]class NumericInfo(supermod.NumericInfo): def __init__(self, minimum=None, maximum=None, mean=None, standardDeviation=None, median=None, interQuartileRange=None, Extension=None, Quantile=None): super(NumericInfo, self).__init__(minimum, maximum, mean, standardDeviation, median, interQuartileRange, Extension, Quantile, )
# # XMLBehaviors # supermod.NumericInfo.subclass = NumericInfo # end class NumericInfo
[docs]class Quantile(supermod.Quantile): def __init__(self, quantileLimit=None, quantileValue=None, Extension=None): super(Quantile, self).__init__(quantileLimit, quantileValue, Extension, )
# # XMLBehaviors # supermod.Quantile.subclass = Quantile # end class Quantile
[docs]class DiscrStats(supermod.DiscrStats): def __init__(self, modalValue=None, Extension=None, Array=None): super(DiscrStats, self).__init__(modalValue, Extension, Array, )
# # XMLBehaviors # supermod.DiscrStats.subclass = DiscrStats # end class DiscrStats
[docs]class ContStats(supermod.ContStats): def __init__(self, totalValuesSum=None, totalSquaresSum=None, Extension=None, Interval=None, NUM_ARRAY=None): super(ContStats, self).__init__(totalValuesSum, totalSquaresSum, Extension, Interval, NUM_ARRAY, )
# # XMLBehaviors # supermod.ContStats.subclass = ContStats # end class ContStats
[docs]class MultivariateStats(supermod.MultivariateStats): def __init__(self, targetCategory=None, Extension=None, MultivariateStat=None): super(MultivariateStats, self).__init__(targetCategory, Extension, MultivariateStat, )
# # XMLBehaviors # supermod.MultivariateStats.subclass = MultivariateStats # end class MultivariateStats
[docs]class MultivariateStat(supermod.MultivariateStat): def __init__(self, name=None, category=None, exponent='1', isIntercept=False, importance=None, stdError=None, tValue=None, chiSquareValue=None, fStatistic=None, dF=None, pValueAlpha=None, pValueInitial=None, pValueFinal=None, confidenceLevel='0.95', confidenceLowerBound=None, confidenceUpperBound=None, Extension=None): super(MultivariateStat, self).__init__(name, category, exponent, isIntercept, importance, stdError, tValue, chiSquareValue, fStatistic, dF, pValueAlpha, pValueInitial, pValueFinal, confidenceLevel, confidenceLowerBound, confidenceUpperBound, Extension, )
# # XMLBehaviors # supermod.MultivariateStat.subclass = MultivariateStat # end class MultivariateStat
[docs]class Anova(supermod.Anova): def __init__(self, target=None, Extension=None, AnovaRow=None): super(Anova, self).__init__(target, Extension, AnovaRow, )
# # XMLBehaviors # supermod.Anova.subclass = Anova # end class Anova
[docs]class AnovaRow(supermod.AnovaRow): def __init__(self, type_=None, sumOfSquares=None, degreesOfFreedom=None, meanOfSquares=None, fValue=None, pValue=None, Extension=None): super(AnovaRow, self).__init__(type_, sumOfSquares, degreesOfFreedom, meanOfSquares, fValue, pValue, Extension, )
# # XMLBehaviors # supermod.AnovaRow.subclass = AnovaRow # end class AnovaRow
[docs]class Partition(supermod.Partition): def __init__(self, name=None, size=None, Extension=None, PartitionFieldStats=None): super(Partition, self).__init__(name, size, Extension, PartitionFieldStats, )
# # XMLBehaviors # supermod.Partition.subclass = Partition # end class Partition
[docs]class PartitionFieldStats(supermod.PartitionFieldStats): def __init__(self, field=None, weighted='0', Extension=None, Counts=None, NumericInfo=None, Array=None): super(PartitionFieldStats, self).__init__(field, weighted, Extension, Counts, NumericInfo, Array, )
# # XMLBehaviors # supermod.PartitionFieldStats.subclass = PartitionFieldStats # end class PartitionFieldStats
[docs]class SupportVectorMachineModel(supermod.SupportVectorMachineModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, threshold='0', svmRepresentation='SupportVectors', classificationMethod='OneAgainstAll', maxWins=False, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, LinearKernelType=None, PolynomialKernelType=None, RadialBasisKernelType=None, SigmoidKernelType=None, VectorDictionary=None, SupportVectorMachine=None, ModelVerification=None, Extension=None): super(SupportVectorMachineModel, self).__init__(modelName, functionName, algorithmName, threshold, svmRepresentation, classificationMethod, maxWins, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, LinearKernelType, PolynomialKernelType, RadialBasisKernelType, SigmoidKernelType, VectorDictionary, SupportVectorMachine, ModelVerification, Extension, )
# # XMLBehaviors # supermod.SupportVectorMachineModel.subclass = SupportVectorMachineModel # end class SupportVectorMachineModel
[docs]class LinearKernelType(supermod.LinearKernelType): def __init__(self, description=None, Extension=None): super(LinearKernelType, self).__init__(description, Extension, )
# # XMLBehaviors # supermod.LinearKernelType.subclass = LinearKernelType # end class LinearKernelType
[docs]class PolynomialKernelType(supermod.PolynomialKernelType): def __init__(self, description=None, gamma='1', coef0='1', degree='1', Extension=None): super(PolynomialKernelType, self).__init__(description, gamma, coef0, degree, Extension, )
# # XMLBehaviors # supermod.PolynomialKernelType.subclass = PolynomialKernelType # end class PolynomialKernelType
[docs]class RadialBasisKernelType(supermod.RadialBasisKernelType): def __init__(self, description=None, gamma='1', Extension=None): super(RadialBasisKernelType, self).__init__(description, gamma, Extension, )
# # XMLBehaviors # supermod.RadialBasisKernelType.subclass = RadialBasisKernelType # end class RadialBasisKernelType
[docs]class SigmoidKernelType(supermod.SigmoidKernelType): def __init__(self, description=None, gamma='1', coef0='1', Extension=None): super(SigmoidKernelType, self).__init__(description, gamma, coef0, Extension, )
# # XMLBehaviors # supermod.SigmoidKernelType.subclass = SigmoidKernelType # end class SigmoidKernelType
[docs]class VectorDictionary(supermod.VectorDictionary): """ The VectorDictionary element holds all support vectors from all support vector machines Parameters ---------- numberOfVectors : The attribute numberOfVectors must be equal to the number of vectors contained in the dictionary VectorFields : VectorFields defines which entries in the vectors correspond to which fields. Note that categorical predictors are usually transformed into groups of dummy continuous variables, each having value 1 if a specific category appears in the case and 0 otherwise. Thus, one categorical field often corresponds to a group of entries in the vector VectorInstance : The elements VectorInstance represent support vectors and are referenced by the id attribute. They do not contain the value of the target mining field """ def __init__(self, numberOfVectors=None, Extension=None, VectorFields=None, VectorInstance=None): super(VectorDictionary, self).__init__(numberOfVectors, Extension, VectorFields, VectorInstance, ) # # XMLBehaviors #
[docs] def set_VectorInstance(self, VectorInstance, *args): self.VectorInstance = VectorInstance self.numberOfVectors = len(self.VectorInstance)
[docs] def set_VectorInstance_wrapper(self, VectorInstance, *args): result = self.set_VectorInstance(VectorInstance, *args) return result
[docs] def add_VectorInstance(self, value, *args): self.VectorInstance.append(value) self.numberOfVectors = len(self.VectorInstance)
[docs] def add_VectorInstance_wrapper(self, value, *args): result = self.add_VectorInstance(value, *args) return result
[docs] def insert_VectorInstance_at(self, index, value, *args): self.VectorInstance.insert(index, value) self.numberOfVectors = len(self.VectorInstance)
[docs] def insert_VectorInstance_at_wrapper(self, index, value, *args): result = self.insert_VectorInstance_at(index, value, *args) return result
supermod.VectorDictionary.subclass = VectorDictionary # end class VectorDictionary
[docs]class VectorFields(supermod.VectorFields): """ VectorFields defines which entries in the vectors correspond to which fields. Note that categorical predictors are usually transformed into groups of dummy continuous variables, each having value 1 if a specific category appears in the case and 0 otherwise. Thus, one categorical field often corresponds to a group of entries in the vector """ def __init__(self, numberOfFields=None, Extension=None, FieldRef=None, CategoricalPredictor=None): super(VectorFields, self).__init__(numberOfFields, Extension, FieldRef, CategoricalPredictor, )
# # XMLBehaviors # supermod.VectorFields.subclass = VectorFields # end class VectorFields
[docs]class VectorInstance(supermod.VectorInstance): """ The elements VectorInstance represent support vectors and are referenced by the id attribute. They do not contain the value of the target mining field. The VectorInstance is a data vector given in dense or sparse array format. The order of the values corresponds to that of the VectorFields. The sizes of the sparse arrays must match the number of fields included in the VectorFields element """ def __init__(self, id=None, Extension=None, REAL_SparseArray=None, Array=None): super(VectorInstance, self).__init__(id, Extension, REAL_SparseArray, Array, )
# # XMLBehaviors # supermod.VectorInstance.subclass = VectorInstance # end class VectorInstance
[docs]class SupportVectorMachine(supermod.SupportVectorMachine): """ The description of Support Vector Machine (SVM) models assumes some familiarity with the SVM theory. In this specification, Support Vector Machine models for classification and regression are considered. A Support Vector Machine is a function f which is defined in the space spanned by the kernel basis functions K(x,xi) of the support vectors xi Parameters ---------- targetCategory : The attribute targetCategory is required for classification models and gives the corresponding class label. This attribute is to be used for classification models implementing the one-against-all method. In this method, for n classes, there are exactly n SupportVectorMachine elements. Depending on the model attribute maxWins, the SVM with the largest or the smallest value determines the predicted class label alternateTargetCategory : The attribute alternateTargetCategory is required in case of binary classification models with only one SupportVectorMachine element. It is also required in case of multi-class classification models implementing the one-against-one method threshold : The attribute threshold defines a discrimination boundary to be used in case of binary classification or whenever attribute classificationMethod is defined as OneAgainstOne for multi-class classification tasks SupportVectors : The term Support Vector (SV) has also a geometrical interpretation because these vectors really support the discrimination function f(x) = 0 in the mechanical interpretation Coefficients : Each coefficient αi is described by the element Coefficient and the number of coefficients corresponds to that of the support vectors. Hence the attribute numberOfCoefficients is equal to the number of support vectors. The attribute absoluteValue contains the value of the absolute coefficient b """ def __init__(self, targetCategory=None, alternateTargetCategory=None, threshold=None, Extension=None, SupportVectors=None, Coefficients=None): super(SupportVectorMachine, self).__init__(targetCategory, alternateTargetCategory, threshold, Extension, SupportVectors, Coefficients, )
# # XMLBehaviors # supermod.SupportVectorMachine.subclass = SupportVectorMachine # end class SupportVectorMachine
[docs]class SupportVectors(supermod.SupportVectors): def __init__(self, numberOfSupportVectors=None, numberOfAttributes=None, Extension=None, SupportVector=None): super(SupportVectors, self).__init__(numberOfSupportVectors, numberOfAttributes, Extension, SupportVector, ) # # XMLBehaviors #
[docs] def set_SupportVector(self, SupportVector, *args): self.SupportVector = SupportVector self.numberOfVectors = len(self.SupportVector)
[docs] def set_SupportVector_wrapper(self, SupportVector, *args): result = self.set_SupportVector(SupportVector, *args) return result
[docs] def add_SupportVector(self, value, *args): self.SupportVector.append(value) self.numberOfVectors = len(self.SupportVector)
[docs] def add_SupportVector_wrapper(self, value, *args): result = self.add_SupportVector(value, *args) return result
[docs] def insert_SupportVector_at(self, index, value, *args): self.SupportVector.insert(index, value) self.numberOfVectors = len(self.SupportVector)
[docs] def insert_SupportVector_at_wrapper(self, index, value, *args): result = self.insert_SupportVector_at(index, value, *args) return result
supermod.SupportVectors.subclass = SupportVectors # end class SupportVectors
[docs]class SupportVector(supermod.SupportVector): def __init__(self, vectorId=None, Extension=None): super(SupportVector, self).__init__(vectorId, Extension, )
# # XMLBehaviors # supermod.SupportVector.subclass = SupportVector # end class SupportVector
[docs]class Coefficients(supermod.Coefficients): def __init__(self, numberOfCoefficients=None, absoluteValue='0', Extension=None, Coefficient=None): super(Coefficients, self).__init__(numberOfCoefficients, absoluteValue, Extension, Coefficient, ) # # XMLBehaviors #
[docs] def set_Coefficient(self, Coefficient, *args): self.Coefficient = Coefficient self.numberOfCoefficients = len(self.Coefficient)
[docs] def set_Coefficient_wrapper(self, Coefficient, *args): result = self.set_Coefficient(Coefficient, *args) return result
[docs] def add_Coefficient(self, value, *args): self.Coefficient.append(value) self.numberOfCoefficients = len(self.Coefficient)
[docs] def add_Coefficient_wrapper(self, value, *args): result = self.add_Coefficient(value, *args) return result
[docs] def insert_Coefficient_at(self, index, value, *args): self.Coefficient.insert(index, value) self.numberOfCoefficients = len(self.Coefficient)
[docs] def insert_Coefficient_at_wrapper(self, index, value, *args): result = self.insert_Coefficient_at(index, value, *args) return result
supermod.Coefficients.subclass = Coefficients # end class Coefficients
[docs]class Coefficient(supermod.Coefficient): def __init__(self, value='0', Extension=None): super(Coefficient, self).__init__(value, Extension, )
# # XMLBehaviors # supermod.Coefficient.subclass = Coefficient # end class Coefficient
[docs]class Targets(supermod.Targets): def __init__(self, Extension=None, Target=None): super(Targets, self).__init__(Extension, Target, )
# # XMLBehaviors # supermod.Targets.subclass = Targets # end class Targets
[docs]class Target(supermod.Target): def __init__(self, field=None, optype=None, castInteger=None, min=None, max=None, rescaleConstant=0, rescaleFactor=1, Extension=None, TargetValue=None): super(Target, self).__init__(field, optype, castInteger, min, max, rescaleConstant, rescaleFactor, Extension, TargetValue, )
# # XMLBehaviors # supermod.Target.subclass = Target # end class Target
[docs]class TargetValue(supermod.TargetValue): def __init__(self, value=None, displayValue=None, priorProbability=None, defaultValue=None, Extension=None, Partition=None): super(TargetValue, self).__init__(value, displayValue, priorProbability, defaultValue, Extension, Partition, )
# # XMLBehaviors # supermod.TargetValue.subclass = TargetValue # end class TargetValue
[docs]class Taxonomy(supermod.Taxonomy): def __init__(self, name=None, Extension=None, ChildParent=None): super(Taxonomy, self).__init__(name, Extension, ChildParent, )
# # XMLBehaviors # supermod.Taxonomy.subclass = Taxonomy # end class Taxonomy
[docs]class ChildParent(supermod.ChildParent): def __init__(self, childField=None, parentField=None, parentLevelField=None, isRecursive='no', Extension=None, FieldColumnPair=None, TableLocator=None, InlineTable=None): super(ChildParent, self).__init__(childField, parentField, parentLevelField, isRecursive, Extension, FieldColumnPair, TableLocator, InlineTable, )
# # XMLBehaviors # supermod.ChildParent.subclass = ChildParent # end class ChildParent
[docs]class TableLocator(supermod.TableLocator): def __init__(self, Extension=None): super(TableLocator, self).__init__(Extension, )
# # XMLBehaviors # supermod.TableLocator.subclass = TableLocator # end class TableLocator
[docs]class InlineTable(supermod.InlineTable): def __init__(self, Extension=None, row=None): super(InlineTable, self).__init__(Extension, row, )
# # XMLBehaviors # supermod.InlineTable.subclass = InlineTable # end class InlineTable
[docs]class row(supermod.row): def __init__(self, anytypeobjs_=None): super(row, self).__init__(anytypeobjs_, )
# # XMLBehaviors # supermod.row.subclass = row # end class row
[docs]class TextModel(supermod.TextModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, numberOfTerms=None, numberOfDocuments=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, TextDictionary=None, TextCorpus=None, DocumentTermMatrix=None, TextModelNormalization=None, TextModelSimiliarity=None, ModelVerification=None, Extension=None): super(TextModel, self).__init__(modelName, functionName, algorithmName, numberOfTerms, numberOfDocuments, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, TextDictionary, TextCorpus, DocumentTermMatrix, TextModelNormalization, TextModelSimiliarity, ModelVerification, Extension, )
# # XMLBehaviors # supermod.TextModel.subclass = TextModel # end class TextModel
[docs]class TextDictionary(supermod.TextDictionary): def __init__(self, Extension=None, Taxonomy=None, Array=None): super(TextDictionary, self).__init__(Extension, Taxonomy, Array, )
# # XMLBehaviors # supermod.TextDictionary.subclass = TextDictionary # end class TextDictionary
[docs]class TextCorpus(supermod.TextCorpus): def __init__(self, Extension=None, TextDocument=None): super(TextCorpus, self).__init__(Extension, TextDocument, )
# # XMLBehaviors # supermod.TextCorpus.subclass = TextCorpus # end class TextCorpus
[docs]class TextDocument(supermod.TextDocument): def __init__(self, id=None, name=None, length=None, file=None, Extension=None): super(TextDocument, self).__init__(id, name, length, file, Extension, )
# # XMLBehaviors # supermod.TextDocument.subclass = TextDocument # end class TextDocument
[docs]class DocumentTermMatrix(supermod.DocumentTermMatrix): def __init__(self, Extension=None, Matrix=None): super(DocumentTermMatrix, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.DocumentTermMatrix.subclass = DocumentTermMatrix # end class DocumentTermMatrix
[docs]class TextModelNormalization(supermod.TextModelNormalization): def __init__(self, localTermWeights='termFrequency', globalTermWeights='inverseDocumentFrequency', documentNormalization='none', Extension=None): super(TextModelNormalization, self).__init__(localTermWeights, globalTermWeights, documentNormalization, Extension, )
# # XMLBehaviors # supermod.TextModelNormalization.subclass = TextModelNormalization # end class TextModelNormalization
[docs]class TextModelSimiliarity(supermod.TextModelSimiliarity): def __init__(self, similarityType=None, Extension=None): super(TextModelSimiliarity, self).__init__(similarityType, Extension, )
# # XMLBehaviors # supermod.TextModelSimiliarity.subclass = TextModelSimiliarity # end class TextModelSimiliarity
[docs]class TimeSeriesModel(supermod.TimeSeriesModel): def __init__(self, modelName=None, functionName=None, algorithmName=None, bestFit=None, isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, LocalTransformations=None, TimeSeries=None, SpectralAnalysis=None, ARIMA=None, ExponentialSmoothing=None, SeasonalTrendDecomposition=None, StateSpaceModel=None, GARCH=None, ModelVerification=None, Extension=None): super(TimeSeriesModel, self).__init__(modelName, functionName, algorithmName, bestFit, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, LocalTransformations, TimeSeries, SpectralAnalysis, ARIMA, ExponentialSmoothing, SeasonalTrendDecomposition, StateSpaceModel, GARCH, ModelVerification, Extension, )
# # XMLBehaviors # supermod.TimeSeriesModel.subclass = TimeSeriesModel # end class TimeSeriesModel
[docs]class TimeSeries(supermod.TimeSeries): def __init__(self, usage='original', startTime=None, endTime=None, interpolationMethod='none', TimeAnchor=None, TimeValue=None): super(TimeSeries, self).__init__(usage, startTime, endTime, interpolationMethod, TimeAnchor, TimeValue, )
# # XMLBehaviors # supermod.TimeSeries.subclass = TimeSeries # end class TimeSeries
[docs]class TimeValue(supermod.TimeValue): def __init__(self, index=None, time=None, value=None, standardError=None, Timestamp=None): super(TimeValue, self).__init__(index, time, value, standardError, Timestamp, )
# # XMLBehaviors # supermod.TimeValue.subclass = TimeValue # end class TimeValue
[docs]class TimeAnchor(supermod.TimeAnchor): def __init__(self, type_=None, offset=None, stepsize=None, displayName=None, TimeCycle=None, TimeException=None): super(TimeAnchor, self).__init__(type_, offset, stepsize, displayName, TimeCycle, TimeException, )
# # XMLBehaviors # supermod.TimeAnchor.subclass = TimeAnchor # end class TimeAnchor
[docs]class TimeCycle(supermod.TimeCycle): def __init__(self, length=None, type_=None, displayName=None, Array=None): super(TimeCycle, self).__init__(length, type_, displayName, Array, )
# # XMLBehaviors # supermod.TimeCycle.subclass = TimeCycle # end class TimeCycle
[docs]class TimeException(supermod.TimeException): def __init__(self, type_=None, count=None, Array=None): super(TimeException, self).__init__(type_, count, Array, )
# # XMLBehaviors # supermod.TimeException.subclass = TimeException # end class TimeException
[docs]class ExponentialSmoothing(supermod.ExponentialSmoothing): def __init__(self, RMSE=None, transformation='none', Level=None, Trend_ExpoSmooth=None, Seasonality_ExpoSmooth=None, TimeValue=None): super(ExponentialSmoothing, self).__init__(RMSE, transformation, Level, Trend_ExpoSmooth, Seasonality_ExpoSmooth, TimeValue, )
# # XMLBehaviors # supermod.ExponentialSmoothing.subclass = ExponentialSmoothing # end class ExponentialSmoothing
[docs]class Level(supermod.Level): def __init__(self, alpha=None, initialLevelValue=None, smoothedValue=None): super(Level, self).__init__(alpha, initialLevelValue, smoothedValue, )
# # XMLBehaviors # supermod.Level.subclass = Level # end class Level
[docs]class Trend_ExpoSmooth(supermod.Trend_ExpoSmooth): def __init__(self, trend='additive', gamma=None, initialTrendValue=None, phi='1', smoothedValue=None, Array=None): super(Trend_ExpoSmooth, self).__init__(trend, gamma, initialTrendValue, phi, smoothedValue, Array, )
# # XMLBehaviors # supermod.Trend_ExpoSmooth.subclass = Trend_ExpoSmooth # end class Trend_ExpoSmooth
[docs]class Seasonality_ExpoSmooth(supermod.Seasonality_ExpoSmooth): def __init__(self, type_=None, period=None, initialSeasonalTrendValue=None, unit=None, phase=None, delta=None, Array=None): super(Seasonality_ExpoSmooth, self).__init__(type_, period, initialSeasonalTrendValue, unit, phase, delta, Array, )
# # XMLBehaviors # supermod.Seasonality_ExpoSmooth.subclass = Seasonality_ExpoSmooth # end class Seasonality_ExpoSmooth
[docs]class ARIMA(supermod.ARIMA): def __init__(self, RMSE=None, transformation='none', constantTerm='0', predictionMethod='conditionalLeastSquares', Extension=None, NonseasonalComponent=None, SeasonalComponent=None, DynamicRegressor=None, MaximumLikelihoodStat=None, OutlierEffect=None): super(ARIMA, self).__init__(RMSE, transformation, constantTerm, predictionMethod, Extension, NonseasonalComponent, SeasonalComponent, DynamicRegressor, MaximumLikelihoodStat, OutlierEffect, )
# # XMLBehaviors # supermod.ARIMA.subclass = ARIMA # end class ARIMA
[docs]class NonseasonalComponent(supermod.NonseasonalComponent): def __init__(self, p=None, d=None, q=None, Extension=None, AR=None, MA=None): super(NonseasonalComponent, self).__init__(p, d, q, Extension, AR, MA, )
# # XMLBehaviors # supermod.NonseasonalComponent.subclass = NonseasonalComponent # end class NonseasonalComponent
[docs]class SeasonalComponent(supermod.SeasonalComponent): def __init__(self, P=None, D=None, Q=None, period=None, Extension=None, AR=None, MA=None): super(SeasonalComponent, self).__init__(P, D, Q, period, Extension, AR, MA, )
# # XMLBehaviors # supermod.SeasonalComponent.subclass = SeasonalComponent # end class SeasonalComponent
[docs]class AR(supermod.AR): def __init__(self, Extension=None, Array=None): super(AR, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.AR.subclass = AR # end class AR
[docs]class MA(supermod.MA): def __init__(self, Extension=None, Coefficients=None, Residuals=None): super(MA, self).__init__(Extension, Coefficients, Residuals, )
# # XMLBehaviors # supermod.MA.subclass = MA # end class MA
[docs]class Residuals(supermod.Residuals): def __init__(self, Extension=None, Array=None): super(Residuals, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.Residuals.subclass = Residuals # end class Residuals
[docs]class DynamicRegressor(supermod.DynamicRegressor): def __init__(self, field=None, transformation='none', delay='0', futureValuesMethod='constant', targetField=None, Extension=None, Numerator=None, Denominator=None, RegressorValues=None): super(DynamicRegressor, self).__init__(field, transformation, delay, futureValuesMethod, targetField, Extension, Numerator, Denominator, RegressorValues, )
# # XMLBehaviors # supermod.DynamicRegressor.subclass = DynamicRegressor # end class DynamicRegressor
[docs]class Numerator(supermod.Numerator): def __init__(self, Extension=None, NonseasonalFactor=None, SeasonalFactor=None): super(Numerator, self).__init__(Extension, NonseasonalFactor, SeasonalFactor, )
# # XMLBehaviors # supermod.Numerator.subclass = Numerator # end class Numerator
[docs]class Denominator(supermod.Denominator): def __init__(self, Extension=None, NonseasonalFactor=None, SeasonalFactor=None): super(Denominator, self).__init__(Extension, NonseasonalFactor, SeasonalFactor, )
# # XMLBehaviors # supermod.Denominator.subclass = Denominator # end class Denominator
[docs]class SeasonalFactor(supermod.SeasonalFactor): def __init__(self, difference='0', maximumOrder=None, Extension=None, Array=None): super(SeasonalFactor, self).__init__(difference, maximumOrder, Extension, Array, )
# # XMLBehaviors # supermod.SeasonalFactor.subclass = SeasonalFactor # end class SeasonalFactor
[docs]class NonseasonalFactor(supermod.NonseasonalFactor): def __init__(self, difference='0', maximumOrder=None, Extension=None, Array=None): super(NonseasonalFactor, self).__init__(difference, maximumOrder, Extension, Array, )
# # XMLBehaviors # supermod.NonseasonalFactor.subclass = NonseasonalFactor # end class NonseasonalFactor
[docs]class RegressorValues(supermod.RegressorValues): def __init__(self, Extension=None, TimeSeries=None, TrendCoefficients=None, TransferFunctionValues=None): super(RegressorValues, self).__init__(Extension, TimeSeries, TrendCoefficients, TransferFunctionValues, )
# # XMLBehaviors # supermod.RegressorValues.subclass = RegressorValues # end class RegressorValues
[docs]class TrendCoefficients(supermod.TrendCoefficients): def __init__(self, Extension=None, REAL_SparseArray=None): super(TrendCoefficients, self).__init__(Extension, REAL_SparseArray, )
# # XMLBehaviors # supermod.TrendCoefficients.subclass = TrendCoefficients # end class TrendCoefficients
[docs]class TransferFunctionValues(supermod.TransferFunctionValues): def __init__(self, Array=None): super(TransferFunctionValues, self).__init__(Array, )
# # XMLBehaviors # supermod.TransferFunctionValues.subclass = TransferFunctionValues # end class TransferFunctionValues
[docs]class MaximumLikelihoodStat(supermod.MaximumLikelihoodStat): def __init__(self, method=None, periodDeficit='0', KalmanState=None, ThetaRecursionState=None): super(MaximumLikelihoodStat, self).__init__(method, periodDeficit, KalmanState, ThetaRecursionState, )
# # XMLBehaviors # supermod.MaximumLikelihoodStat.subclass = MaximumLikelihoodStat # end class MaximumLikelihoodStat
[docs]class KalmanState(supermod.KalmanState): def __init__(self, FinalOmega=None, FinalStateVector=None, HVector=None): super(KalmanState, self).__init__(FinalOmega, FinalStateVector, HVector, )
# # XMLBehaviors # supermod.KalmanState.subclass = KalmanState # end class KalmanState
[docs]class FinalOmega(supermod.FinalOmega): def __init__(self, Matrix=None): super(FinalOmega, self).__init__(Matrix, )
# # XMLBehaviors # supermod.FinalOmega.subclass = FinalOmega # end class FinalOmega
[docs]class FinalStateVector(supermod.FinalStateVector): def __init__(self, Array=None): super(FinalStateVector, self).__init__(Array, )
# # XMLBehaviors # supermod.FinalStateVector.subclass = FinalStateVector # end class FinalStateVector
[docs]class HVector(supermod.HVector): def __init__(self, Array=None): super(HVector, self).__init__(Array, )
# # XMLBehaviors # supermod.HVector.subclass = HVector # end class HVector
[docs]class ThetaRecursionState(supermod.ThetaRecursionState): def __init__(self, FinalNoise=None, FinalPredictedNoise=None, FinalTheta=None, FinalNu=None): super(ThetaRecursionState, self).__init__(FinalNoise, FinalPredictedNoise, FinalTheta, FinalNu, )
# # XMLBehaviors # supermod.ThetaRecursionState.subclass = ThetaRecursionState # end class ThetaRecursionState
[docs]class FinalNoise(supermod.FinalNoise): def __init__(self, Array=None): super(FinalNoise, self).__init__(Array, )
# # XMLBehaviors # supermod.FinalNoise.subclass = FinalNoise # end class FinalNoise
[docs]class FinalPredictedNoise(supermod.FinalPredictedNoise): def __init__(self, Array=None): super(FinalPredictedNoise, self).__init__(Array, )
# # XMLBehaviors # supermod.FinalPredictedNoise.subclass = FinalPredictedNoise # end class FinalPredictedNoise
[docs]class FinalTheta(supermod.FinalTheta): def __init__(self, Theta=None): super(FinalTheta, self).__init__(Theta, )
# # XMLBehaviors # supermod.FinalTheta.subclass = FinalTheta # end class FinalTheta
[docs]class Theta(supermod.Theta): def __init__(self, i=None, j=None, theta=None): super(Theta, self).__init__(i, j, theta, )
# # XMLBehaviors # supermod.Theta.subclass = Theta # end class Theta
[docs]class FinalNu(supermod.FinalNu): def __init__(self, Array=None): super(FinalNu, self).__init__(Array, )
# # XMLBehaviors # supermod.FinalNu.subclass = FinalNu # end class FinalNu
[docs]class OutlierEffect(supermod.OutlierEffect): def __init__(self, type_=None, startTime=None, magnitude=None, dampingCoefficient=None, Extension=None): super(OutlierEffect, self).__init__(type_, startTime, magnitude, dampingCoefficient, Extension, )
# # XMLBehaviors # supermod.OutlierEffect.subclass = OutlierEffect # end class OutlierEffect
[docs]class GARCH(supermod.GARCH): def __init__(self, Extension=None, ARMAPart=None, GARCHPart=None): super(GARCH, self).__init__(Extension, ARMAPart, GARCHPart, )
# # XMLBehaviors # supermod.GARCH.subclass = GARCH # end class GARCH
[docs]class ARMAPart(supermod.ARMAPart): def __init__(self, constant='0', p=None, q=None, Extension=None, AR=None, MA=None): super(ARMAPart, self).__init__(constant, p, q, Extension, AR, MA, )
# # XMLBehaviors # supermod.ARMAPart.subclass = ARMAPart # end class ARMAPart
[docs]class GARCHPart(supermod.GARCHPart): def __init__(self, constant='0', gp=None, gq=None, Extension=None, ResidualSquareCoefficients=None, VarianceCoefficients=None): super(GARCHPart, self).__init__(constant, gp, gq, Extension, ResidualSquareCoefficients, VarianceCoefficients, )
# # XMLBehaviors # supermod.GARCHPart.subclass = GARCHPart # end class GARCHPart
[docs]class ResidualSquareCoefficients(supermod.ResidualSquareCoefficients): def __init__(self, Extension=None, Residuals=None, Coefficients=None): super(ResidualSquareCoefficients, self).__init__(Extension, Residuals, Coefficients, )
# # XMLBehaviors # supermod.ResidualSquareCoefficients.subclass = ResidualSquareCoefficients # end class ResidualSquareCoefficients
[docs]class VarianceCoefficients(supermod.VarianceCoefficients): def __init__(self, Extension=None, PastVariances=None, Coefficients=None): super(VarianceCoefficients, self).__init__(Extension, PastVariances, Coefficients, )
# # XMLBehaviors # supermod.VarianceCoefficients.subclass = VarianceCoefficients # end class VarianceCoefficients
[docs]class PastVariances(supermod.PastVariances): def __init__(self, Extension=None, Array=None): super(PastVariances, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.PastVariances.subclass = PastVariances # end class PastVariances
[docs]class StateSpaceModel(supermod.StateSpaceModel): def __init__(self, variance=None, period='none', intercept='0', Extension=None, StateVector=None, TransitionMatrix=None, MeasurementMatrix=None, PsiVector=None, DynamicRegressor=None): super(StateSpaceModel, self).__init__(variance, period, intercept, Extension, StateVector, TransitionMatrix, MeasurementMatrix, PsiVector, DynamicRegressor, )
# # XMLBehaviors # supermod.StateSpaceModel.subclass = StateSpaceModel # end class StateSpaceModel
[docs]class StateVector(supermod.StateVector): def __init__(self, Extension=None, Array=None): super(StateVector, self).__init__(Extension, Array, )
# # XMLBehaviors # supermod.StateVector.subclass = StateVector # end class StateVector
[docs]class TransitionMatrix(supermod.TransitionMatrix): def __init__(self, Extension=None, Matrix=None): super(TransitionMatrix, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.TransitionMatrix.subclass = TransitionMatrix # end class TransitionMatrix
[docs]class MeasurementMatrix(supermod.MeasurementMatrix): def __init__(self, Extension=None, Matrix=None): super(MeasurementMatrix, self).__init__(Extension, Matrix, )
# # XMLBehaviors # supermod.MeasurementMatrix.subclass = MeasurementMatrix # end class MeasurementMatrix
[docs]class PsiVector(supermod.PsiVector): def __init__(self, targetField=None, variance=None, Extension=None, Array=None): super(PsiVector, self).__init__(targetField, variance, Extension, Array, )
# # XMLBehaviors # supermod.PsiVector.subclass = PsiVector # end class PsiVector
[docs]class SpectralAnalysis(supermod.SpectralAnalysis): def __init__(self): super(SpectralAnalysis, self).__init__()
# # XMLBehaviors # supermod.SpectralAnalysis.subclass = SpectralAnalysis # end class SpectralAnalysis
[docs]class SeasonalTrendDecomposition(supermod.SeasonalTrendDecomposition): def __init__(self): super(SeasonalTrendDecomposition, self).__init__()
# # XMLBehaviors # supermod.SeasonalTrendDecomposition.subclass = SeasonalTrendDecomposition # end class SeasonalTrendDecomposition
[docs]class TransformationDictionary(supermod.TransformationDictionary): def __init__(self, Extension=None, DefineFunction=None, DerivedField=None): super(TransformationDictionary, self).__init__(Extension, DefineFunction, DerivedField, )
# # XMLBehaviors # supermod.TransformationDictionary.subclass = TransformationDictionary # end class TransformationDictionary
[docs]class LocalTransformations(supermod.LocalTransformations): def __init__(self, Extension=None, DerivedField=None): super(LocalTransformations, self).__init__(Extension, DerivedField, )
# # XMLBehaviors # supermod.LocalTransformations.subclass = LocalTransformations # end class LocalTransformations
[docs]class DerivedField(supermod.DerivedField): """ which provides a common element for the various mappings. They can also appear at several places in the definition of specific models such as neural network or Naïve Bayes models Parameters ---------- name: name of the element optype: The attribute optype is needed in order to eliminate cases where the resulting type is not known dataType: specifies the data type for the output column FieldRef: Field references are simply pass-throughs to fields previously defined in the DataDictionary, a DerivedField, or a result field Constant: used in expressions which have multiple arguments. The actual value of a constant is given by the content of the element NormContinuous: defines how to normalize an input field by piecewise linear interpolation NormDiscrete: refer to a certain input field define a fan-out function which maps a single input field to a set of normalized fields Discretize: Takes the input field as input and maps values less than 0 to negative and other values to positive MapValues: element can be used to create missing value indicators for categorical variables TextIndex: TextIndex expression to extract frequency information from the text input field, for a given term. The TextIndex element fully configures how the text input should be indexed, including case sensitivity, normalization and other settings Aggregate: summarize or collect groups of values, e.g., compute average Lag: defined as the value of the given input field a fixed number of records prior to the current one,If the desired value is not present, for a given record, the lag will be set to missing """ def __init__(self, name=None, displayName=None, optype=None, dataType=None, datasetName=None, trainingBackend=None, architectureName=None, Extension=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex=None, Aggregate=None, Lag=None, Value=None): super(DerivedField, self).__init__(name, displayName, optype, dataType, datasetName, trainingBackend, architectureName, Extension, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex, Aggregate, Lag, Value, )
# # XMLBehaviors # supermod.DerivedField.subclass = DerivedField # end class DerivedField
[docs]class Constant(supermod.Constant): """ Used in expressions which have multiple arguments. The actual value of a constant is given by the content of the element Parameters: ---------- dataType: Describe the dataType of the ParametersField valueOf_: Value of the given Constant """ def __init__(self, dataType=None, valueOf_=None): super(Constant, self).__init__(dataType, valueOf_, )
# # XMLBehaviors # supermod.Constant.subclass = Constant # end class Constant
[docs]class FieldRef(supermod.FieldRef): """ Field references are simply pass-throughs to fields previously defined in the DataDictionary, a DerivedField, or a result field Parameters ---------- field: Name of the field mapMissingTo: may be used to map a missing result to the value specified by the attribute. If the attribute is not present, the result remains missing """ def __init__(self, field=None, mapMissingTo=None, Extension=None): super(FieldRef, self).__init__(field, mapMissingTo, Extension, )
# # XMLBehaviors # supermod.FieldRef.subclass = FieldRef # end class FieldRef
[docs]class NormContinuous(supermod.NormContinuous): """ Used to implement simple normalization functions such as the z-score transformation (X - m ) / s, where m is the mean value and s is the standard deviation. Parameters ---------- mapMissingTo : may be used to map a missing result to the value specified by the attribute. If the attribute is not present, the result remains missing outliers : it shows the records which are out of trend LinearNorm : defines a sequence of points for a stepwise linear interpolation function,LinearNorm must be strictly sorted by ascending value of orig """ def __init__(self, mapMissingTo=None, field=None, outliers='asIs', Extension=None, LinearNorm=None): super(NormContinuous, self).__init__(mapMissingTo, field, outliers, Extension, LinearNorm, )
# # XMLBehaviors # supermod.NormContinuous.subclass = NormContinuous # end class NormContinuous
[docs]class LinearNorm(supermod.LinearNorm): def __init__(self, orig=None, norm=None, Extension=None): super(LinearNorm, self).__init__(orig, norm, Extension, )
# # XMLBehaviors # supermod.LinearNorm.subclass = LinearNorm # end class LinearNorm
[docs]class NormDiscrete(supermod.NormDiscrete): """ Refer to a certain input field define a fan-out function which maps a single input field to a set of normalized fields. Parameters ---------- mapMissingTo : may be used to map a missing result to the value specified by the attribute. If the attribute is not present, the result remains missing """ def __init__(self, field=None, value=None, mapMissingTo=None, Extension=None): super(NormDiscrete, self).__init__(field, value, mapMissingTo, Extension, )
# # XMLBehaviors # supermod.NormDiscrete.subclass = NormDiscrete # end class NormDiscrete
[docs]class Discretize(supermod.Discretize): """ Discretization of numerical input fields is a mapping from continuous to discrete values using intervals Parameters ---------- field: defines the name of the input field mapMissingTo: may be used to map a missing result to the value specified by the attribute. If the attribute is not present, the result remains missing DiscretizeBin: define a set of mappings from an intervali to a binValue """ def __init__(self, field=None, mapMissingTo=None, defaultValue=None, dataType=None, Extension=None, DiscretizeBin=None): super(Discretize, self).__init__(field, mapMissingTo, defaultValue, dataType, Extension, DiscretizeBin, )
# # XMLBehaviors # supermod.Discretize.subclass = Discretize # end class Discretize
[docs]class DiscretizeBin(supermod.DiscretizeBin): def __init__(self, binValue=None, Extension=None, Interval=None): super(DiscretizeBin, self).__init__(binValue, Extension, Interval, )
# # XMLBehaviors # supermod.DiscretizeBin.subclass = DiscretizeBin # end class DiscretizeBin
[docs]class MapValues(supermod.MapValues): """ element can be used to create missing value indicators for categorical variables Parameters ---------- mapMissingTo : string may be used to map a missing result to the value specified by the attribute. If the attribute is not present, the result remains missing InlineTable : used in mapping values """ def __init__(self, mapMissingTo=None, defaultValue=None, outputColumn=None, dataType=None, Extension=None, FieldColumnPair=None, TableLocator=None, InlineTable=None): super(MapValues, self).__init__(mapMissingTo, defaultValue, outputColumn, dataType, Extension, FieldColumnPair, TableLocator, InlineTable, )
# # XMLBehaviors # supermod.MapValues.subclass = MapValues # end class MapValues
[docs]class FieldColumnPair(supermod.FieldColumnPair): def __init__(self, field=None, column=None, Extension=None): super(FieldColumnPair, self).__init__(field, column, Extension, )
# # XMLBehaviors # supermod.FieldColumnPair.subclass = FieldColumnPair # end class FieldColumnPair
[docs]class TextIndex(supermod.TextIndex): """ TextIndex expression to extract frequency information from the text input field, for a given term. The TextIndex element fully configures how the text input should be indexed, including case sensitivity, normalization and other settings, Parameters ---------- FieldRef : Field references are simply pass-throughs to fields previously defined in the DataDictionary, a DerivedField, or a result field Constant : used in expressions which have multiple arguments. The actual value of a constant is given by the content of the element NormContinuous : defines how to normalize an input field by piecewise linear interpolation NormDiscrete : refer to a certain input field define a fan-out function which maps a single input field to a set of normalized fields Discretize : Discretization of numerical input fields is a mapping from continuous to discrete values using intervals MapValues : element can be used to create missing value indicators for categorical variables Aggregate : summarize or collect groups of values, e.g., compute average Lag : defined as the value of the given input field a fixed number of records prior to the current one,If the desired value is not present, for a given record, the lag will be set to missing """ def __init__(self, textField=None, localTermWeights='termFrequency', isCaseSensitive=False, maxLevenshteinDistance=0, countHits='allHits', wordSeparatorCharacterRE='\\s', tokenize=True, Extension=None, TextIndexNormalization=None, Apply=None, FieldRef=None, Constant=None, NormContinuous=None, NormDiscrete=None, Discretize=None, MapValues=None, TextIndex_member=None, Aggregate=None, Lag=None): super(TextIndex, self).__init__(textField, localTermWeights, isCaseSensitive, maxLevenshteinDistance, countHits, wordSeparatorCharacterRE, tokenize, Extension, TextIndexNormalization, Apply, FieldRef, Constant, NormContinuous, NormDiscrete, Discretize, MapValues, TextIndex_member, Aggregate, Lag, )
# # XMLBehaviors # supermod.TextIndex.subclass = TextIndex # end class TextIndex
[docs]class TextIndexNormalization(supermod.TextIndexNormalization): def __init__(self, inField='string', outField='stem', regexField='regex', recursive=False, isCaseSensitive=None, maxLevenshteinDistance=None, wordSeparatorCharacterRE=None, tokenize=None, Extension=None, TableLocator=None, InlineTable=None): super(TextIndexNormalization, self).__init__(inField, outField, regexField, recursive, isCaseSensitive, maxLevenshteinDistance, wordSeparatorCharacterRE, tokenize, Extension, TableLocator, InlineTable, )
# # XMLBehaviors # supermod.TextIndexNormalization.subclass = TextIndexNormalization # end class TextIndexNormalization
[docs]class Aggregate(supermod.Aggregate): """ summarize or collect groups of values, e.g. compute average Parameters ---------- field : string Column names """ def __init__(self, field=None, function=None, groupField=None, sqlWhere=None, Extension=None): super(Aggregate, self).__init__(field, function, groupField, sqlWhere, Extension, )
# # XMLBehaviors # supermod.Aggregate.subclass = Aggregate # end class Aggregate
[docs]class Lag(supermod.Lag): """ defined as the value of the given input field a fixed number of records prior to the current one,If the desired value is not present, for a given record, the lag will be set to missing """ def __init__(self, field=None, n=1, Extension=None, BlockIndicator=None): super(Lag, self).__init__(field, n, Extension, BlockIndicator, )
# # XMLBehaviors # supermod.Lag.subclass = Lag # end class Lag
[docs]class BlockIndicator(supermod.BlockIndicator): def __init__(self, field=None): super(BlockIndicator, self).__init__(field, )
# # XMLBehaviors # supermod.BlockIndicator.subclass = BlockIndicator # end class BlockIndicator
[docs]class TreeModel(supermod.TreeModel): """ TreeModel in PMML allows for defining either a classification or prediction structure. Each Node holds a logical predicate expression that defines the rule for choosing the Node or any of the branching Nodes. Parameters ---------- modelName : element identifies the model with a unique name in the context of the PMML file functionName : Stores what type of problems it is ex classification or regression algorithmName : Stores algorithm name used in the model missingValueStrategy : defines a strategy for dealing with missing values missingValuePenalty : defines a penalty applied to confidence calculation when missing value handling is performed noTrueChildStrategy : defines what to do in situations where scoring cannot reach a leaf node splitCharacteristic : indicates whether non-leaf Nodes in the tree model have exactly two children, or an unrestricted number of children isScorable : indicates whether the model is valid for scoring MiningSchema : Stores the MiningField in building PMML Node : this element is an encapsulation for either defining a split or a leaf in a tree model. Every Node contains a predicate that identifies a rule for choosing itself or any of its siblings """ def __init__(self, modelName=None, functionName=None, algorithmName=None, missingValueStrategy='none', missingValuePenalty='1.0', noTrueChildStrategy='returnNullPrediction', splitCharacteristic='multiSplit', isScorable=True, MiningSchema=None, Output=None, ModelStats=None, ModelExplanation=None, Targets=None, LocalTransformations=None, Node=None, ModelVerification=None, Extension=None): super(TreeModel, self).__init__(modelName, functionName, algorithmName, missingValueStrategy, missingValuePenalty, noTrueChildStrategy, splitCharacteristic, isScorable, MiningSchema, Output, ModelStats, ModelExplanation, Targets, LocalTransformations, Node, ModelVerification, Extension, )
# # XMLBehaviors # supermod.TreeModel.subclass = TreeModel # end class TreeModel
[docs]class Node(supermod.Node): """ Node is an encapsulation for either defining a split or a leaf in a tree model. Every Node contains a predicate that identifies a rule for choosing itself or any of its siblings Parameters ---------- id : The value of id serves as a unique identifier for any given Node within the tree model score : the predicted value for a record that chooses the Node recordCount : allow to determine the relative size of given values in a ScoreDistribution as well as the relative size of a Node when compared to the parent Node defaultChild : Only applicable when missingValueStrategy is set to defaultChild in the TreeModel element SimplePredicate : defines a rule in the form of a simple boolean expression. The rule consists of field, operator (booleanOperator) for binary comparison, and value CompoundPredicate : an encapsulating element for combining two or more elements as defined at the entity PREDICATE SimpleSetPredicate : checks whether a field value is element of a set. The set of values is specified by the array True_ : a predicate element that identifies the boolean constant TRUE False_ : a predicate element that identifies the boolean constant False Partition : Optional element to provide distribution information for all records that belong to the respective Node ScoreDistribution : an element of Node to represent segments of the score that a Node predicts in a classification model. If the Node holds an enumeration, each entry of the enumeration is stored in one ScoreDistribution element """ def __init__(self, id=None, score=None, recordCount=None, defaultChild=None, SimplePredicate=None, CompoundPredicate=None, SimpleSetPredicate=None, True_=None, False_=None, Partition=None, ScoreDistribution=None, Node_member=None, Extension=None, Regression=None, DecisionTree=None): super(Node, self).__init__(id, score, recordCount, defaultChild, SimplePredicate, CompoundPredicate, SimpleSetPredicate, True_, False_, Partition, ScoreDistribution, Node_member, Extension, Regression, DecisionTree, )
# # XMLBehaviors # supermod.Node.subclass = Node # end class Node
[docs]class SimplePredicate(supermod.SimplePredicate): """ defines a rule in the form of a simple boolean expression. The rule consists of field, operator (booleanOperator) for binary comparison, and value Parameters ---------- field : This attribute of the SimplePredicate element is the name attribute of a MiningField or a DerivedField from TransformationDictionary or LocalTransformations operator : this attribute of SimplePredicate is one of the six pre-defined comparison operators Operator Math Symbol equal = notEqual ≠ lessThan < lessOrEqual ≤ greaterThan > greaterOrEqual ≥ value : This attribute of SimplePredicate element is the information to evaluate / compare against """ def __init__(self, field=None, operator=None, value=None, Extension=None): super(SimplePredicate, self).__init__(field, operator, value, Extension, )
# # XMLBehaviors # supermod.SimplePredicate.subclass = SimplePredicate # end class SimplePredicate
[docs]class CompoundPredicate(supermod.CompoundPredicate): """ an encapsulating element for combining two or more elements as defined at the entity PREDICATE Parameters: ---------- booleanOperator: The operators and, or and xor are associative binary operators, having their usual semantics. The order of evaluation is irrelevant for all the predicates within one CompoundPredicate SimplePredicate: defines a rule in the form of a simple boolean expression. The rule consists of field, operator (booleanOperator) for binary comparison, and value SimpleSetPredicate: checks whether a field value is element of a set. The set of values is specified by the array True_: a predicate element that identifies the boolean constant TRUE False_: a predicate element that identifies the boolean constant False """ def __init__(self, booleanOperator=None, Extension=None, SimplePredicate=None, CompoundPredicate_member=None, SimpleSetPredicate=None, True_=None, False_=None): super(CompoundPredicate, self).__init__(booleanOperator, Extension, SimplePredicate, CompoundPredicate_member, SimpleSetPredicate, True_, False_, )
# # XMLBehaviors # supermod.CompoundPredicate.subclass = CompoundPredicate # end class CompoundPredicate
[docs]class SimpleSetPredicate(supermod.SimpleSetPredicate): """ checks whether a field value is element of a set. The set of values is specified by the array Parameters ---------- booleanOperator : can take one of following boolean operators: isIn, and isNotIn Array : The set of values is specified by the array in the content """ def __init__(self, field=None, booleanOperator=None, Extension=None, Array=None): super(SimpleSetPredicate, self).__init__(field, booleanOperator, Extension, Array, )
# # XMLBehaviors # supermod.SimpleSetPredicate.subclass = SimpleSetPredicate # end class SimpleSetPredicate
[docs]class True_(supermod.True_): def __init__(self, Extension=None): super(True_, self).__init__(Extension, )
# # XMLBehaviors # supermod.True_.subclass = True_ # end class True_
[docs]class False_(supermod.False_): def __init__(self, Extension=None): super(False_, self).__init__(Extension, )
# # XMLBehaviors # supermod.False_.subclass = False_ # end class False_
[docs]class ScoreDistribution(supermod.ScoreDistribution): """ an element of Node to represent segments of the score that a Node predicts in a classification model. If the Node holds an enumeration, each entry of the enumeration is stored in one ScoreDistribution element Parameters ---------- value : This attribute of ScoreDistribution is the label in a classification model recordCount : This attribute of ScoreDistribution is the size (in number of records) associated with the value attribute confidence : This optional attribute of ScoreDistribution assigns a confidence to a given prediction class for this tree node , Confidences are similar to probabilities but more relaxed The confidences may not necessarily sum to 1 across the different classes, like probabilities would. Confidences should normally lie in the range 0.0 to 1.0 though probability : This optional attribute assigns a predicted probability for the given value within the node """ def __init__(self, value=None, recordCount=None, confidence=None, probability=None, Extension=None): super(ScoreDistribution, self).__init__(value, recordCount, confidence, probability, Extension, )
# # XMLBehaviors # supermod.ScoreDistribution.subclass = ScoreDistribution # end class ScoreDistribution
[docs]def get_root_tag(node): tag = supermod.Tag_pattern_.match(node.tag).groups()[-1] rootClass = None rootClass = supermod.GDSClassesMapping.get(tag) if rootClass is None and hasattr(supermod, tag): rootClass = getattr(supermod, tag) return tag, rootClass
[docs]def parseSub(inFilename, silence=False): parser = None doc = parsexml_(inFilename, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'AssociationModel' rootClass = supermod.AssociationModel rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None if not silence: sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export( sys.stdout, 0, name_=rootTag, namespacedef_='', pretty_print=True) return rootObj
[docs]def parseEtree(inFilename, silence=False): parser = None doc = parsexml_(inFilename, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'AssociationModel' rootClass = supermod.AssociationModel rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None mapping = {} rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) if not silence: content = etree_.tostring( rootElement, pretty_print=True, xml_declaration=True, encoding="utf-8") sys.stdout.write(content) sys.stdout.write('\n') return rootObj, rootElement, mapping, reverse_mapping
[docs]def parseString(inString, silence=False): from StringIO import StringIO parser = None doc = parsexml_(StringIO(inString), parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'AssociationModel' rootClass = supermod.AssociationModel rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None if not silence: sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export( sys.stdout, 0, name_=rootTag, namespacedef_='') return rootObj
[docs]def parseLiteral(inFilename, silence=False): parser = None doc = parsexml_(inFilename, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'AssociationModel' rootClass = supermod.AssociationModel rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None if not silence: sys.stdout.write('#from nyoka.pmml.PMML43ExtSuper import *\n\n') sys.stdout.write('import nyoka.pmml.PMML43ExtSuper as model_\n\n') sys.stdout.write('rootObj = model_.rootClass(\n') rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) sys.stdout.write(')\n') return rootObj
USAGE_TEXT = """ Usage: python ???.py <infilename> """
[docs]def usage(): print(USAGE_TEXT) sys.exit(1)
[docs]def main(): args = sys.argv[1:] if len(args) != 1: usage() infilename = args[0] parse(infilename)
if __name__ == '__main__': #import pdb; pdb.set_trace() main()
[docs]def parse(inFileName, silence=False): orig_init() result = parseSub(inFileName, silence) new_init() return result
[docs]def new_init(): def LayerWeights_init(self, weightsShape=None, weightsFlattenAxis=None, content=None, floatType="float32", floatsPerLine=12, src=None, Extension=None, mixedclass_=None): self.original_tagname_ = None self.weightsShape = supermod._cast(None, weightsShape) self.weightsFlattenAxis = supermod._cast(None, weightsFlattenAxis) self.src = supermod._cast(None, src) if Extension is None: self.Extension = [] else: self.Extension = Extension if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ validFloatTypes = ["float6", "float7", "float8", "float16", "float32", "float64"] if floatType not in validFloatTypes: floatType = "float32" from Base64 import FloatBase64 base64string = "\t\t\t\t" + "data:" + floatType + ";base64," + FloatBase64.from_floatArray(content, floatsPerLine) base64string = base64string.replace("\n", "\n\t\t\t\t") self.content_ = [supermod.MixedContainer(1, 2, "", base64string)] self.valueOf_ = base64string def LayerBias_init(self, biasShape=None, biasFlattenAxis=None, content=None, floatType="float32", floatsPerLine=12, src=None, Extension=None, mixedclass_=None): self.original_tagname_ = None self.biasShape = supermod._cast(None, biasShape) self.biasFlattenAxis = supermod._cast(None, biasFlattenAxis) self.src = supermod._cast(None, src) if Extension is None: self.Extension = [] else: self.Extension = Extension if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ validFloatTypes = ["float6", "float7", "float8", "float16", "float32", "float64"] if floatType not in validFloatTypes: floatType = "float32" from Base64 import FloatBase64 base64string = "\t\t\t\t" + "data:" + floatType + ";base64," + FloatBase64.from_floatArray(content, floatsPerLine) base64string = base64string.replace("\n", "\n\t\t\t\t") self.content_ = [supermod.MixedContainer(1, 2, "", base64string)] self.valueOf_ = base64string def ArrayType_init(self, content=None, n=None, type_=None, mixedclass_=None): self.original_tagname_ = None self.n = supermod._cast(None, n) self.type_ = supermod._cast(None, type_) if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ self.content_ = [supermod.MixedContainer(1, 2, "", str(content))] self.valueOf_ = str(content) def Annotation_init(self, content=None, Extension=None, mixedclass_=None): self.original_tagname_ = None if Extension is None: self.Extension = [] else: self.Extension = Extension if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ self.content_ = [supermod.MixedContainer(1, 2, "", str(content))] self.valueOf_ = str(content) def Timestamp_init(self, content=None, Extension=None, mixedclass_=None): self.original_tagname_ = None if Extension is None: self.Extension = [] else: self.Extension = Extension if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ self.content_ = [supermod.MixedContainer(1, 2, "", str(content))] self.valueOf_ = str(content) def PMML_init(self, version='4.3', Header=None, script=None, MiningBuildTask=None, DataDictionary=None, TransformationDictionary=None, AssociationModel=None, BayesianNetworkModel=None, BaselineModel=None, ClusteringModel=None, DeepNetwork=None, GaussianProcessModel=None, GeneralRegressionModel=None, MiningModel=None, NaiveBayesModel=None, NearestNeighborModel=None, NeuralNetwork=None, RegressionModel=None, RuleSetModel=None, SequenceModel=None, Scorecard=None, SupportVectorMachineModel=None, TextModel=None, TimeSeriesModel=None, TreeModel=None, Extension=None): self.original_tagname_ = None self.version = supermod._cast(None, version) self.Header = Header if script is None: self.script = [] else: self.script = script self.MiningBuildTask = MiningBuildTask self.DataDictionary = DataDictionary if AssociationModel is None: self.AssociationModel = [] else: self.AssociationModel = AssociationModel if BayesianNetworkModel is None: self.BayesianNetworkModel = [] else: self.BayesianNetworkModel = BayesianNetworkModel if BaselineModel is None: self.BaselineModel = [] else: self.BaselineModel = BaselineModel if ClusteringModel is None: self.ClusteringModel = [] else: self.ClusteringModel = ClusteringModel if DeepNetwork is None: self.DeepNetwork = [] else: self.DeepNetwork = DeepNetwork if GaussianProcessModel is None: self.GaussianProcessModel = [] else: self.GaussianProcessModel = GaussianProcessModel if GeneralRegressionModel is None: self.GeneralRegressionModel = [] else: self.GeneralRegressionModel = GeneralRegressionModel if MiningModel is None: self.MiningModel = [] else: self.MiningModel = MiningModel if NaiveBayesModel is None: self.NaiveBayesModel = [] else: self.NaiveBayesModel = NaiveBayesModel if NearestNeighborModel is None: self.NearestNeighborModel = [] else: self.NearestNeighborModel = NearestNeighborModel if NeuralNetwork is None: self.NeuralNetwork = [] else: self.NeuralNetwork = NeuralNetwork if RegressionModel is None: self.RegressionModel = [] else: self.RegressionModel = RegressionModel if RuleSetModel is None: self.RuleSetModel = [] else: self.RuleSetModel = RuleSetModel if SequenceModel is None: self.SequenceModel = [] else: self.SequenceModel = SequenceModel if Scorecard is None: self.Scorecard = [] else: self.Scorecard = Scorecard if SupportVectorMachineModel is None: self.SupportVectorMachineModel = [] else: self.SupportVectorMachineModel = SupportVectorMachineModel if TextModel is None: self.TextModel = [] else: self.TextModel = TextModel if TimeSeriesModel is None: self.TimeSeriesModel = [] else: self.TimeSeriesModel = TimeSeriesModel if TransformationDictionary is None: self.TransformationDictionary = [] else: self.TransformationDictionary = TransformationDictionary if TreeModel is None: self.TreeModel = [] else: self.TreeModel = TreeModel if Extension is None: self.Extension = [] else: self.Extension = Extension def script_init(self, content=None, for_=None, class_=None, Extension=None): self.original_tagname_ = None self.for_ = supermod._cast(None, for_) self.class_ = supermod._cast(None, class_) if Extension is None: self.Extension = [] else: self.Extension = Extension self.anyAttributes_ = {} self.mixedclass_ = supermod.MixedContainer self.content_ = [supermod.MixedContainer(1, 2, "", str(content))] self.valueOf_ = str(content) LayerWeights.__init__ = LayerWeights_init LayerBias.__init__ = LayerBias_init ArrayType.__init__ = ArrayType_init Annotation.__init__ = Annotation_init Timestamp.__init__ = Timestamp_init PMML.__init__ = PMML_init script.__init__ = script_init
[docs]def orig_init(): def LayerWeights_init(self, weightsShape=None, weightsFlattenAxis=None, src=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): self.original_tagname_ = None self.weightsShape = supermod._cast(None, weightsShape) self.weightsFlattenAxis = supermod._cast(None, weightsFlattenAxis) self.src = supermod._cast(None, src) if Extension is None: self.Extension = [] else: self.Extension = Extension self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def LayerBias_init(self, biasShape=None, biasFlattenAxis=None, src=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): self.original_tagname_ = None self.biasShape = supermod._cast(None, biasShape) self.biasFlattenAxis = supermod._cast(None, biasFlattenAxis) self.src = supermod._cast(None, src) if Extension is None: self.Extension = [] else: self.Extension = Extension self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def ArrayType_init(self, n=None, type_=None, valueOf_=None, mixedclass_=None, content_=None): self.original_tagname_ = None self.n = supermod._cast(None, n) self.type_ = supermod._cast(None, type_) self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def Annotation_init(self, Extension=None, valueOf_=None, mixedclass_=None, content_=None): self.original_tagname_ = None if Extension is None: self.Extension = [] else: self.Extension = Extension self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def Timestamp_init(self, Extension=None, valueOf_=None, mixedclass_=None, content_=None): self.original_tagname_ = None if Extension is None: self.Extension = [] else: self.Extension = Extension self.valueOf_ = valueOf_ if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ def PMML_init(self, version=None, Header=None, script=None, MiningBuildTask=None, DataDictionary=None, TransformationDictionary=None, AssociationModel=None, BayesianNetworkModel=None, BaselineModel=None, ClusteringModel=None, DeepNetwork=None, GaussianProcessModel=None, GeneralRegressionModel=None, MiningModel=None, NaiveBayesModel=None, NearestNeighborModel=None, NeuralNetwork=None, RegressionModel=None, RuleSetModel=None, SequenceModel=None, Scorecard=None, SupportVectorMachineModel=None, TextModel=None, TimeSeriesModel=None, TreeModel=None, Extension=None): self.original_tagname_ = None self.version = supermod._cast(None, version) self.Header = Header if script is None: self.script = [] else: self.script = script self.MiningBuildTask = MiningBuildTask self.DataDictionary = DataDictionary self.TransformationDictionary = TransformationDictionary if AssociationModel is None: self.AssociationModel = [] else: self.AssociationModel = AssociationModel if BayesianNetworkModel is None: self.BayesianNetworkModel = [] else: self.BayesianNetworkModel = BayesianNetworkModel if BaselineModel is None: self.BaselineModel = [] else: self.BaselineModel = BaselineModel if ClusteringModel is None: self.ClusteringModel = [] else: self.ClusteringModel = ClusteringModel if DeepNetwork is None: self.DeepNetwork = [] else: self.DeepNetwork = DeepNetwork if GaussianProcessModel is None: self.GaussianProcessModel = [] else: self.GaussianProcessModel = GaussianProcessModel if GeneralRegressionModel is None: self.GeneralRegressionModel = [] else: self.GeneralRegressionModel = GeneralRegressionModel if MiningModel is None: self.MiningModel = [] else: self.MiningModel = MiningModel if NaiveBayesModel is None: self.NaiveBayesModel = [] else: self.NaiveBayesModel = NaiveBayesModel if NearestNeighborModel is None: self.NearestNeighborModel = [] else: self.NearestNeighborModel = NearestNeighborModel if NeuralNetwork is None: self.NeuralNetwork = [] else: self.NeuralNetwork = NeuralNetwork if RegressionModel is None: self.RegressionModel = [] else: self.RegressionModel = RegressionModel if RuleSetModel is None: self.RuleSetModel = [] else: self.RuleSetModel = RuleSetModel if SequenceModel is None: self.SequenceModel = [] else: self.SequenceModel = SequenceModel if Scorecard is None: self.Scorecard = [] else: self.Scorecard = Scorecard if SupportVectorMachineModel is None: self.SupportVectorMachineModel = [] else: self.SupportVectorMachineModel = SupportVectorMachineModel if TextModel is None: self.TextModel = [] else: self.TextModel = TextModel if TimeSeriesModel is None: self.TimeSeriesModel = [] else: self.TimeSeriesModel = TimeSeriesModel if TransformationDictionary is None: self.TransformationDictionary = [] else: self.TransformationDictionary = TransformationDictionary if TreeModel is None: self.TreeModel = [] else: self.TreeModel = TreeModel if Extension is None: self.Extension = [] else: self.Extension = Extension def script_init(self, for_=None, class_=None, Extension=None, valueOf_=None, mixedclass_=None, content_=None): self.original_tagname_ = None self.for_ = supermod._cast(None, for_) self.class_ = supermod._cast(None, class_) if Extension is None: self.Extension = [] else: self.Extension = Extension self.valueOf_ = valueOf_ self.anyAttributes_ = {} if mixedclass_ is None: self.mixedclass_ = supermod.MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ self.valueOf_ = valueOf_ LayerWeights.__init__ = LayerWeights_init LayerBias.__init__ = LayerBias_init ArrayType.__init__ = ArrayType_init Annotation.__init__ = Annotation_init Timestamp.__init__ = Timestamp_init PMML.__init__ = PMML_init script.__init__ = script_init
new_init()
[docs]def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write('\t')