reponame stringclasses 5
values | filepath stringlengths 15 64 | content stringlengths 63 24.6k |
|---|---|---|
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/kubeflow_runner.py | from absl import logging
from tfx import v1 as tfx
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner
from tfx.proto import trainer_pb2
from pipeline import configs, pipeline
def run():
runner_config = runner.KubeflowV2DagRunnerConfig(default_image=configs.PIPELINE_IMAGE)
runner.Kube... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/local_runner.py | import os
from absl import logging
from tfx import v1 as tfx
from tfx.orchestration.data_types import RuntimeParameter
from pipeline import configs
from pipeline import local_pipeline
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
# NOTE: It is recommen... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/model_analysis.ipynb | # import required libs
import glob
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
print('TF version: {}'.format(tf.version.VERSION))
print('TFMA version: {}'.format(tfma.version.VERSION_STRING))# Read artifact information from metadata store.
import beam_dag_runner
from tfx.orchestration i... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/__init__.py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or a... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/common.py | IMAGE_KEY = "image"
IMAGE_SHAPE_KEY = "image_shape"
LABEL_KEY = "label"
LABEL_SHAPE_KEY = "label_shape"
CONCRETE_INPUT = "pixel_values"
NUM_LABELS = 3
|
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/hyperparams.py | INPUT_IMG_SIZE = 128
TRAIN_BATCH_SIZE = 64
EVAL_BATCH_SIZE = 64
EPOCHS = 10
|
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/preprocessing.py | import tensorflow as tf
from tensorflow.keras.applications import mobilenet_v2
from .utils import transformed_name
from .common import IMAGE_KEY, LABEL_KEY
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transf... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/signatures.py | from typing import Dict
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow.keras.applications import mobilenet_v2
from .utils import transformed_name
from .common import IMAGE_KEY, LABEL_KEY, CONCRETE_INPUT
from .hyperparams import INPUT_IMG_SIZE
def _serving_preprocess(string_input):
""... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/train.py | from typing import List
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor, FnArgs
from tfx_bsl.tfxio import dataset_options
from .common import IMAGE_KEY, LABEL_KEY, NUM_LABELS
from .hyperparams import EPOCHS, EVAL_BATCH_SIZE, TRAIN_BATCH_SIZE
fro... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/unet.py | import tensorflow as tf
from .hyperparams import INPUT_IMG_SIZE
"""
_build_model builds a UNET model. The implementation codes are
borrowed from the [TF official tutorial on Semantic Segmentation]
(https://www.tensorflow.org/tutorials/images/segmentation)
"""
def build_model(input_name, label_name, num_... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/models/utils.py | import absl
def INFO(text: str):
absl.logging.info(text)
def transformed_name(key: str) -> str:
return key + "_xf"
|
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/configs.py | import os # pylint: disable=unused-import
import tensorflow_model_analysis as tfma
import tfx.extensions.google_cloud_ai_platform.constants as vertex_const
import tfx.extensions.google_cloud_ai_platform.trainer.executor as vertex_training_const
PIPELINE_NAME = "segmentation-training-pipeline"
try:
import google... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/local_pipeline.py | from typing import Any, Dict, List, Optional, Text
from tfx import v1 as tfx
import tensorflow_model_analysis as tfma
from ml_metadata.proto import metadata_store_pb2
from tfx.proto import example_gen_pb2
import absl
import tensorflow_model_analysis as tfma
from tfx.components import ImportExampleGen
from tfx.compon... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/pipeline.py | from typing import Any, Dict, List, Optional, Text
import tensorflow_model_analysis as tfma
from ml_metadata.proto import metadata_store_pb2
from tfx import v1 as tfx
from tfx.components import (
Evaluator,
ImportExampleGen,
StatisticsGen,
Transform,
)
from tfx.dsl.components.common import resolver
fro... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/apps/gradio/semantic_segmentation/app.py | import gradio as gr
import numpy as np
import tensorflow as tf
from huggingface_hub import from_pretrained_keras
from PIL import Image
MODEL_CKPT = "$MODEL_REPO_ID@$MODEL_VERSION"
MODEL = from_pretrained_keras(MODEL_CKPT)
RESOLTUION = 128
PETS_PALETTE = []
with open(r"./palette.txt", "r") as fp:
for line in fp:
... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/components/HFPusher/__init__.py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/components/HFPusher/component.py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/components/HFPusher/component_test.py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/components/HFPusher/executor.py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/components/HFPusher/model_card.py | from huggingface_hub import ModelCard, ModelCardData
def create_card(template_path, model_metadata, **template_kwargs):
"""Creates model card.
Args:
template_path (str): Path to the jinja template model is based on.
model_metadata (dict): Dict of card metadata.
Refer to the link to kn... |
deep-diver/semantic-segmentation-ml-pipeline | training_pipeline/pipeline/components/HFPusher/runner.py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
sayakpaul/CI-CD-for-Model-Training | cloud_build_tfx.ipynb | from google.colab import auth
auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "fast-ai-exploration"
GOOGLE_CLOUD_REGION = "us-central1"
GCS_BUCKET_NAME = "vertex-tfx-mlops"
PIPELINE_NAME = "penguin-vertex-training"
DATA_ROOT = "gs://{}/data/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
MODULE_ROOT = "gs://{}/pipeline_modu... |
sayakpaul/CI-CD-for-Model-Training | cloud_function_trigger.ipynb | from google.colab import auth
auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "fast-ai-exploration"
GOOGLE_CLOUD_REGION = "us-central1"
GCS_BUCKET_NAME = "vertex-tfx-mlops"
PIPELINE_NAME = "penguin-vertex-training"
PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
PIPELINE_LOCATION = f"{... |
sayakpaul/CI-CD-for-Model-Training | cloud_scheduler_trigger.ipynb | # only need if you are using Colab
from google.colab import auth
auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "gcp-ml-172005"
GOOGLE_CLOUD_REGION = "us-central1"
PIPELINE_NAME = "penguin-vertex-training"
PUBSUB_TOPIC = f"trigger-{PIPELINE_NAME}"
SCHEDULER_JOB_NAME = "MLOpsJob"import json
data = '{"num_epochs": "3",... |
sayakpaul/CI-CD-for-Model-Training | build/compile_pipeline.py | import argparse
from absl import logging
from create_pipeline import create_pipeline
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner
import os
import sys
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)... |
sayakpaul/CI-CD-for-Model-Training | build/create_pipeline.py | from tfx.orchestration import data_types
from tfx import v1 as tfx
import os
import sys
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
)
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, "..")))
from utils import config, custom_components
def creat... |
sayakpaul/CI-CD-for-Model-Training | build/penguin_trainer.py | # Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple and
# slightly modified run_fn() to add distribution_strategy.
from typing import List
from absl import logging
import tensorflow as tf
from tensorflow import keras
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform.tf... |
sayakpaul/CI-CD-for-Model-Training | cloud_function/main.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, ... |
sayakpaul/CI-CD-for-Model-Training | utils/config.py | import os
# GCP
GCP_PROJECT = os.getenv("PROJECT")
GCP_REGION = os.getenv("REGION")
# Data
DATA_ROOT = os.getenv("DATA_ROOT")
# Training and serving
TFX_IMAGE_URI = os.getenv("TFX_IMAGE_URI")
MODULE_ROOT = os.getenv("MODULE_ROOT")
MODULE_FILE = os.path.join(MODULE_ROOT, "penguin_trainer.py")
SERVING_MODEL_DIR = os.g... |
sayakpaul/CI-CD-for-Model-Training | utils/custom_components.py | """
Taken from:
* https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/src/tfx_pipelines/components.py#L51
"""
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import (
InputArtifact,
OutputArtifact,
Parameter,
)
from tfx.t... |
sayakpaul/Dual-Deployments-on-Vertex-AI | custom_components/firebase_publisher.py | """
Custom TFX component for Firebase upload.
Author: Chansung Park
"""
from tfx import types
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx import v1 as tfx
from absl import logging
import firebase_admin
from firebase_admin im... |
sayakpaul/Dual-Deployments-on-Vertex-AI | custom_components/flower_densenet_trainer.py | from typing import List
from absl import logging
from tensorflow import keras
from tfx import v1 as tfx
import tensorflow as tf
_IMAGE_FEATURES = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
"one_hot_class": tf.io.VarLenFeature(tf.float32),
}
_CONCRETE_IN... |
sayakpaul/Dual-Deployments-on-Vertex-AI | custom_components/flower_mobilenet_trainer.py | from typing import List
from absl import logging
from tensorflow import keras
from tfx import v1 as tfx
import tensorflow as tf
_IMAGE_FEATURES = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
"one_hot_class": tf.io.VarLenFeature(tf.float32),
}
_INPUT_SHAPE... |
sayakpaul/Dual-Deployments-on-Vertex-AI | custom_components/vertex_deployer.py | """
Custom TFX component for deploying a model to a Vertex AI Endpoint.
Author: Sayak Paul
Reference: https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/build/utils.py#L97
"""
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Param... |
sayakpaul/Dual-Deployments-on-Vertex-AI | custom_components/vertex_uploader.py | """
Custom TFX component for importing a model into Vertex AI.
Author: Sayak Paul
Reference: https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/src/tfx_pipelines/components.py#L74
"""
import os
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.comp... |
sayakpaul/Dual-Deployments-on-Vertex-AI | notebooks/Custom_Model_TFX.ipynb | from google.colab import auth
auth.authenticate_user()import tensorflow as tf
print('TensorFlow version: {}'.format(tf.__version__))
from tfx import v1 as tfx
print('TFX version: {}'.format(tfx.__version__))
import kfp
print('KFP version: {}'.format(kfp.__version__))
from google.cloud import aiplatform as vertex_ai
im... |
sayakpaul/Dual-Deployments-on-Vertex-AI | notebooks/Dataset_Prep.ipynb | #@title GCS
#@markdown You should change these values as per your preferences. The copy operation can take ~5 minutes.
BUCKET_PATH = "gs://flowers-experimental" #@param {type:"string"}
REGION = "us-central1" #@param {type:"string"}
!gsutil mb -l {REGION} {BUCKET_PATH}
!gsutil -m cp -r flower_photos {BUCKET_PATH}impor... |
sayakpaul/Dual-Deployments-on-Vertex-AI | notebooks/Dual_Deployments_With_AutoML.ipynb | import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"# Automatically re... |
sayakpaul/Dual-Deployments-on-Vertex-AI | notebooks/Model_Tests.ipynb | from io import BytesIO
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import requests
import base64
from google.cloud.aiplatform.gapic.schema import predict
from google.cloud import aiplatform
import tensorflow as tfdef preprocess_image(image):
"""Preprocesses an image."""
image = np.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.