Skip to content

Commit

Permalink
Merge commit for internal changes
Browse files Browse the repository at this point in the history
  • Loading branch information
wbakst committed Aug 10, 2020
2 parents 2f2cf33 + 034aaaa commit 2aeb367
Show file tree
Hide file tree
Showing 42 changed files with 3,848 additions and 1,201 deletions.
1 change: 1 addition & 0 deletions build_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def main(_):
'tfl': ['python'],
'tfl.aggregation_layer': ['Aggregation'],
'tfl.categorical_calibration_layer': ['CategoricalCalibration'],
'tfl.kronecker_factored_lattice_layer': ['KroneckerFactoredLattice'],
'tfl.lattice_layer': ['Lattice'],
'tfl.linear_layer': ['Linear'],
'tfl.pwl_calibration_layer': ['PWLCalibration'],
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorials/aggregate_function_models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@
"source": [
"## Aggregate Function Model\n",
"\n",
"To construct a TFL premade model, first construct a model configuration from [tfl.configs](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs). An aggregate function model is constructed using the [tfl.configs.AggregateFunctionConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/AggregateFunctionConfig). It applies piecewise-linear and categorical calibration, followed by a lattice model on each dimension of the ragged input. It then applies an aggregation layer over the output for each dimension. This is then followed by an optional output piecewise-lienar calibration."
"To construct a TFL premade model, first construct a model configuration from [tfl.configs](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs). An aggregate function model is constructed using the [tfl.configs.AggregateFunctionConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/AggregateFunctionConfig). It applies piecewise-linear and categorical calibration, followed by a lattice model on each dimension of the ragged input. It then applies an aggregation layer over the output for each dimension. This is then followed by an optional output piecewise-linear calibration."
]
},
{
Expand Down
87 changes: 71 additions & 16 deletions docs/tutorials/canned_estimators.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {},
Expand Down Expand Up @@ -101,7 +101,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand All @@ -125,7 +125,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"cellView": "both",
"colab": {},
Expand All @@ -136,6 +136,7 @@
"source": [
"import tensorflow as tf\n",
"\n",
"import copy\n",
"import logging\n",
"import numpy as np\n",
"import pandas as pd\n",
Expand All @@ -157,7 +158,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"cellView": "both",
"colab": {},
Expand Down Expand Up @@ -190,7 +191,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"cellView": "both",
"colab": {},
Expand Down Expand Up @@ -219,7 +220,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down Expand Up @@ -285,7 +286,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down Expand Up @@ -341,7 +342,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down Expand Up @@ -450,7 +451,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down Expand Up @@ -499,7 +500,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down Expand Up @@ -559,7 +560,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand All @@ -568,7 +569,7 @@
"outputs": [],
"source": [
"# This is random lattice ensemble model with separate calibration:\n",
"# model output is the average output of separatly calibrated lattices.\n",
"# model output is the average output of separately calibrated lattices.\n",
"model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n",
" feature_configs=feature_configs,\n",
" num_lattices=5,\n",
Expand All @@ -589,6 +590,60 @@
"tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "7uyO8s97FGJM"
},
"source": [
"### RTL Layer Random Lattice Ensemble\n",
"\n",
"The following model config uses a `tfl.layers.RTL` layer that uses a random subset of features for each lattice. We note that `tfl.layers.RTL` only supports monotonicity constraints and must have the same lattice size for all features and no per-feature regularization. Note that using a `tfl.layers.RTL` layer lets you scale to much larger ensembles than using separate `tfl.layers.Lattice` instances."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "8v7dKg-FF7iz"
},
"outputs": [],
"source": [
"# Make sure our feature configs have the same lattice size, no per-feature\n",
"# regularization, and only monotonicity constraints.\n",
"rtl_layer_feature_configs = copy.deepcopy(feature_configs)\n",
"for feature_config in rtl_layer_feature_configs:\n",
" feature_config.lattice_size = 2\n",
" feature_config.unimodality = 'none'\n",
" feature_config.reflects_trust_in = None\n",
" feature_config.dominates = None\n",
" feature_config.regularizer_configs = None\n",
"# This is RTL layer ensemble model with separate calibration:\n",
"# model output is the average output of separately calibrated lattices.\n",
"model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n",
" lattices='rtl_layer',\n",
" feature_configs=rtl_layer_feature_configs,\n",
" num_lattices=5,\n",
" lattice_rank=3)\n",
"# A CannedClassifier is constructed from the given model config.\n",
"estimator = tfl.estimators.CannedClassifier(\n",
" feature_columns=feature_columns,\n",
" model_config=model_config,\n",
" feature_analysis_input_fn=feature_analysis_input_fn,\n",
" optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n",
" config=tf.estimator.RunConfig(tf_random_seed=42))\n",
"estimator.train(input_fn=train_input_fn)\n",
"results = estimator.evaluate(input_fn=test_input_fn)\n",
"print('Random ensemble test AUC: {}'.format(results['auc']))\n",
"saved_model_path = estimator.export_saved_model(estimator.model_dir,\n",
" serving_input_fn)\n",
"model_graph = tfl.estimators.get_model_graph(saved_model_path)\n",
"tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand All @@ -605,7 +660,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down Expand Up @@ -634,7 +689,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand All @@ -643,7 +698,7 @@
"outputs": [],
"source": [
"# This is Crystals ensemble model with separate calibration: model output is\n",
"# the average output of separatly calibrated lattices.\n",
"# the average output of separately calibrated lattices.\n",
"model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n",
" feature_configs=feature_configs,\n",
" lattices='crystals',\n",
Expand Down Expand Up @@ -680,7 +735,7 @@
},
{
"cell_type": "code",
"execution_count": 0,
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorials/custom_estimators.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@
"\n",
"There are several ways to create a custom estimator. Here we will construct a `model_fn` that calls a Keras model on the parsed input tensors. To parse the input features, you can use `tf.feature_column.input_layer`, `tf.keras.layers.DenseFeatures`, or `tfl.estimators.transform_features`. If you use the latter, you will not need to wrap categorical features with dense feature columns, and the resulting tensors will not be concatenated, which makes it easier to use the features in the calibration layers.\n",
"\n",
"To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. When then use the Keras model to create the custom estimator.\n"
"To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. We then use the Keras model to create the custom estimator.\n"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions docs/tutorials/keras_layers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@
"id": "W3DnEKWvQYXm"
},
"source": [
"We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in paralel in order to be able to create a Sequential model.\n"
"We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in parallel in order to be able to create a Sequential model.\n"
]
},
{
Expand All @@ -260,7 +260,7 @@
"id": "BPZsSUZiQiwc"
},
"source": [
"We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration` and for categorical features we use `tfl.layers.CategoricalCalibration`."
"We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration`, and for categorical features we use `tfl.layers.CategoricalCalibration`."
]
},
{
Expand All @@ -282,7 +282,7 @@
" training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n",
" # You need to ensure that input keypoints have same dtype as layer input.\n",
" # You can do it by setting dtype here or by providing keypoints in such\n",
" # format which will be converted to deisred tf.dtype by default.\n",
" # format which will be converted to desired tf.dtype by default.\n",
" dtype=tf.float32,\n",
" # Output range must correspond to expected lattice input range.\n",
" output_min=0.0,\n",
Expand Down Expand Up @@ -542,7 +542,7 @@
" training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n",
" # You need to ensure that input keypoints have same dtype as layer input.\n",
" # You can do it by setting dtype here or by providing keypoints in such\n",
" # format which will be converted to deisred tf.dtype by default.\n",
" # format which will be converted to desired tf.dtype by default.\n",
" dtype=tf.float32,\n",
" # Output range must correspond to expected lattice input range.\n",
" output_min=0.0,\n",
Expand Down
Loading

0 comments on commit 2aeb367

Please sign in to comment.