abacusai
========

.. py:module:: abacusai


Submodules
----------

.. toctree::
   :maxdepth: 1

   /autoapi/abacusai/abacus_api/index
   /autoapi/abacusai/address/index
   /autoapi/abacusai/agent/index
   /autoapi/abacusai/agent_chat_message/index
   /autoapi/abacusai/agent_conversation/index
   /autoapi/abacusai/agent_data_document_info/index
   /autoapi/abacusai/agent_data_execution_result/index
   /autoapi/abacusai/agent_version/index
   /autoapi/abacusai/ai_building_task/index
   /autoapi/abacusai/algorithm/index
   /autoapi/abacusai/annotation/index
   /autoapi/abacusai/annotation_config/index
   /autoapi/abacusai/annotation_document/index
   /autoapi/abacusai/annotation_entry/index
   /autoapi/abacusai/annotations_status/index
   /autoapi/abacusai/api_class/index
   /autoapi/abacusai/api_client_utils/index
   /autoapi/abacusai/api_endpoint/index
   /autoapi/abacusai/api_key/index
   /autoapi/abacusai/app_user_group/index
   /autoapi/abacusai/app_user_group_sign_in_token/index
   /autoapi/abacusai/application_connector/index
   /autoapi/abacusai/audio_gen_settings/index
   /autoapi/abacusai/batch_prediction/index
   /autoapi/abacusai/batch_prediction_version/index
   /autoapi/abacusai/batch_prediction_version_logs/index
   /autoapi/abacusai/bot_info/index
   /autoapi/abacusai/categorical_range_violation/index
   /autoapi/abacusai/chat_message/index
   /autoapi/abacusai/chat_session/index
   /autoapi/abacusai/chatllm_computer/index
   /autoapi/abacusai/chatllm_project/index
   /autoapi/abacusai/chatllm_referral_invite/index
   /autoapi/abacusai/chatllm_task/index
   /autoapi/abacusai/client/index
   /autoapi/abacusai/code_agent_response/index
   /autoapi/abacusai/code_autocomplete_response/index
   /autoapi/abacusai/code_bot/index
   /autoapi/abacusai/code_edit/index
   /autoapi/abacusai/code_edit_response/index
   /autoapi/abacusai/code_edits/index
   /autoapi/abacusai/code_source/index
   /autoapi/abacusai/code_suggestion_validation_response/index
   /autoapi/abacusai/compute_point_info/index
   /autoapi/abacusai/concatenation_config/index
   /autoapi/abacusai/constants_autocomplete_response/index
   /autoapi/abacusai/cpu_gpu_memory_specs/index
   /autoapi/abacusai/cryptography/index
   /autoapi/abacusai/custom_chat_instructions/index
   /autoapi/abacusai/custom_loss_function/index
   /autoapi/abacusai/custom_metric/index
   /autoapi/abacusai/custom_metric_version/index
   /autoapi/abacusai/custom_train_function_info/index
   /autoapi/abacusai/data_consistency_duplication/index
   /autoapi/abacusai/data_metrics/index
   /autoapi/abacusai/data_prep_logs/index
   /autoapi/abacusai/data_quality_results/index
   /autoapi/abacusai/data_upload_result/index
   /autoapi/abacusai/database_column_feature_mapping/index
   /autoapi/abacusai/database_connector/index
   /autoapi/abacusai/database_connector_column/index
   /autoapi/abacusai/database_connector_schema/index
   /autoapi/abacusai/dataset/index
   /autoapi/abacusai/dataset_column/index
   /autoapi/abacusai/dataset_version/index
   /autoapi/abacusai/dataset_version_logs/index
   /autoapi/abacusai/deployment/index
   /autoapi/abacusai/deployment_auth_token/index
   /autoapi/abacusai/deployment_conversation/index
   /autoapi/abacusai/deployment_conversation_event/index
   /autoapi/abacusai/deployment_conversation_export/index
   /autoapi/abacusai/deployment_statistics/index
   /autoapi/abacusai/document_data/index
   /autoapi/abacusai/document_retriever/index
   /autoapi/abacusai/document_retriever_lookup_result/index
   /autoapi/abacusai/document_retriever_version/index
   /autoapi/abacusai/drift_distribution/index
   /autoapi/abacusai/drift_distributions/index
   /autoapi/abacusai/eda/index
   /autoapi/abacusai/eda_chart_description/index
   /autoapi/abacusai/eda_collinearity/index
   /autoapi/abacusai/eda_data_consistency/index
   /autoapi/abacusai/eda_feature_association/index
   /autoapi/abacusai/eda_feature_collinearity/index
   /autoapi/abacusai/eda_forecasting_analysis/index
   /autoapi/abacusai/eda_version/index
   /autoapi/abacusai/edit_image_models/index
   /autoapi/abacusai/embedding_feature_drift_distribution/index
   /autoapi/abacusai/execute_feature_group_operation/index
   /autoapi/abacusai/external_application/index
   /autoapi/abacusai/external_invite/index
   /autoapi/abacusai/extracted_fields/index
   /autoapi/abacusai/feature/index
   /autoapi/abacusai/feature_distribution/index
   /autoapi/abacusai/feature_drift_record/index
   /autoapi/abacusai/feature_drift_summary/index
   /autoapi/abacusai/feature_group/index
   /autoapi/abacusai/feature_group_document/index
   /autoapi/abacusai/feature_group_export/index
   /autoapi/abacusai/feature_group_export_config/index
   /autoapi/abacusai/feature_group_export_download_url/index
   /autoapi/abacusai/feature_group_lineage/index
   /autoapi/abacusai/feature_group_refresh_export_config/index
   /autoapi/abacusai/feature_group_row/index
   /autoapi/abacusai/feature_group_row_process/index
   /autoapi/abacusai/feature_group_row_process_logs/index
   /autoapi/abacusai/feature_group_row_process_summary/index
   /autoapi/abacusai/feature_group_template/index
   /autoapi/abacusai/feature_group_template_variable_options/index
   /autoapi/abacusai/feature_group_version/index
   /autoapi/abacusai/feature_group_version_logs/index
   /autoapi/abacusai/feature_importance/index
   /autoapi/abacusai/feature_mapping/index
   /autoapi/abacusai/feature_performance_analysis/index
   /autoapi/abacusai/file_connector/index
   /autoapi/abacusai/file_connector_instructions/index
   /autoapi/abacusai/file_connector_verification/index
   /autoapi/abacusai/finetuned_pretrained_model/index
   /autoapi/abacusai/forecasting_analysis_graph_data/index
   /autoapi/abacusai/forecasting_monitor_item_analysis/index
   /autoapi/abacusai/forecasting_monitor_summary/index
   /autoapi/abacusai/fs_entry/index
   /autoapi/abacusai/function_logs/index
   /autoapi/abacusai/generated_pit_feature_config_option/index
   /autoapi/abacusai/global_context/index
   /autoapi/abacusai/graph_dashboard/index
   /autoapi/abacusai/holdout_analysis/index
   /autoapi/abacusai/holdout_analysis_version/index
   /autoapi/abacusai/hosted_app/index
   /autoapi/abacusai/hosted_app_container/index
   /autoapi/abacusai/hosted_app_file_read/index
   /autoapi/abacusai/hosted_model_token/index
   /autoapi/abacusai/hume_voice/index
   /autoapi/abacusai/image_gen_settings/index
   /autoapi/abacusai/indexing_config/index
   /autoapi/abacusai/inferred_database_column_to_feature_mappings/index
   /autoapi/abacusai/inferred_feature_mappings/index
   /autoapi/abacusai/item_statistics/index
   /autoapi/abacusai/llm_app/index
   /autoapi/abacusai/llm_code_block/index
   /autoapi/abacusai/llm_execution_preview/index
   /autoapi/abacusai/llm_execution_result/index
   /autoapi/abacusai/llm_generated_code/index
   /autoapi/abacusai/llm_input/index
   /autoapi/abacusai/llm_parameters/index
   /autoapi/abacusai/llm_response/index
   /autoapi/abacusai/memory_options/index
   /autoapi/abacusai/messaging_connector_response/index
   /autoapi/abacusai/model/index
   /autoapi/abacusai/model_artifacts_export/index
   /autoapi/abacusai/model_blueprint_export/index
   /autoapi/abacusai/model_blueprint_stage/index
   /autoapi/abacusai/model_location/index
   /autoapi/abacusai/model_metrics/index
   /autoapi/abacusai/model_monitor/index
   /autoapi/abacusai/model_monitor_org_summary/index
   /autoapi/abacusai/model_monitor_summary/index
   /autoapi/abacusai/model_monitor_summary_from_org/index
   /autoapi/abacusai/model_monitor_version/index
   /autoapi/abacusai/model_monitor_version_metric_data/index
   /autoapi/abacusai/model_training_type_for_deployment/index
   /autoapi/abacusai/model_upload/index
   /autoapi/abacusai/model_version/index
   /autoapi/abacusai/model_version_feature_group_schema/index
   /autoapi/abacusai/modification_lock_info/index
   /autoapi/abacusai/module/index
   /autoapi/abacusai/monitor_alert/index
   /autoapi/abacusai/monitor_alert_version/index
   /autoapi/abacusai/monitor_drift_and_distributions/index
   /autoapi/abacusai/natural_language_explanation/index
   /autoapi/abacusai/nested_feature/index
   /autoapi/abacusai/nested_feature_schema/index
   /autoapi/abacusai/news_search_result/index
   /autoapi/abacusai/nlp_chat_response/index
   /autoapi/abacusai/null_violation/index
   /autoapi/abacusai/organization_external_application_settings/index
   /autoapi/abacusai/organization_group/index
   /autoapi/abacusai/organization_search_result/index
   /autoapi/abacusai/organization_secret/index
   /autoapi/abacusai/page_data/index
   /autoapi/abacusai/pipeline/index
   /autoapi/abacusai/pipeline_reference/index
   /autoapi/abacusai/pipeline_step/index
   /autoapi/abacusai/pipeline_step_version/index
   /autoapi/abacusai/pipeline_step_version_logs/index
   /autoapi/abacusai/pipeline_step_version_reference/index
   /autoapi/abacusai/pipeline_version/index
   /autoapi/abacusai/pipeline_version_logs/index
   /autoapi/abacusai/playground_text/index
   /autoapi/abacusai/point_in_time_feature/index
   /autoapi/abacusai/point_in_time_feature_info/index
   /autoapi/abacusai/point_in_time_group/index
   /autoapi/abacusai/point_in_time_group_feature/index
   /autoapi/abacusai/prediction_client/index
   /autoapi/abacusai/prediction_dataset/index
   /autoapi/abacusai/prediction_feature_group/index
   /autoapi/abacusai/prediction_input/index
   /autoapi/abacusai/prediction_log_record/index
   /autoapi/abacusai/prediction_operator/index
   /autoapi/abacusai/prediction_operator_version/index
   /autoapi/abacusai/problem_type/index
   /autoapi/abacusai/project/index
   /autoapi/abacusai/project_config/index
   /autoapi/abacusai/project_feature_group/index
   /autoapi/abacusai/project_feature_group_schema/index
   /autoapi/abacusai/project_feature_group_schema_version/index
   /autoapi/abacusai/project_validation/index
   /autoapi/abacusai/python_function/index
   /autoapi/abacusai/python_function_validator/index
   /autoapi/abacusai/python_plot_function/index
   /autoapi/abacusai/range_violation/index
   /autoapi/abacusai/realtime_monitor/index
   /autoapi/abacusai/refresh_pipeline_run/index
   /autoapi/abacusai/refresh_policy/index
   /autoapi/abacusai/refresh_schedule/index
   /autoapi/abacusai/regenerate_llm_external_application/index
   /autoapi/abacusai/resolved_feature_group_template/index
   /autoapi/abacusai/return_class/index
   /autoapi/abacusai/routing_action/index
   /autoapi/abacusai/schema/index
   /autoapi/abacusai/sftp_key/index
   /autoapi/abacusai/streaming_auth_token/index
   /autoapi/abacusai/streaming_client/index
   /autoapi/abacusai/streaming_connector/index
   /autoapi/abacusai/streaming_row_count/index
   /autoapi/abacusai/streaming_sample_code/index
   /autoapi/abacusai/template_node_details/index
   /autoapi/abacusai/test_point_predictions/index
   /autoapi/abacusai/tone_details/index
   /autoapi/abacusai/training_config_options/index
   /autoapi/abacusai/twitter_search_result/index
   /autoapi/abacusai/upload/index
   /autoapi/abacusai/upload_part/index
   /autoapi/abacusai/use_case/index
   /autoapi/abacusai/use_case_requirements/index
   /autoapi/abacusai/user/index
   /autoapi/abacusai/user_exception/index
   /autoapi/abacusai/video_gen_settings/index
   /autoapi/abacusai/video_search_result/index
   /autoapi/abacusai/voice_gen_details/index
   /autoapi/abacusai/web_page_response/index
   /autoapi/abacusai/web_search_response/index
   /autoapi/abacusai/web_search_result/index
   /autoapi/abacusai/webhook/index
   /autoapi/abacusai/workflow_graph_node_details/index
   /autoapi/abacusai/workflow_node_template/index


Attributes
----------

.. autoapisummary::

   abacusai.DocumentRetrieverConfig
   abacusai.Segment
   abacusai._request_context
   abacusai.__version__


Exceptions
----------

.. autoapisummary::

   abacusai.ApiException


Classes
-------

.. autoapisummary::

   abacusai.AbacusApi
   abacusai.Address
   abacusai.Agent
   abacusai.AgentChatMessage
   abacusai.AgentConversation
   abacusai.AgentDataDocumentInfo
   abacusai.AgentDataExecutionResult
   abacusai.AgentVersion
   abacusai.AiBuildingTask
   abacusai.Algorithm
   abacusai.Annotation
   abacusai.AnnotationConfig
   abacusai.AnnotationDocument
   abacusai.AnnotationEntry
   abacusai.AnnotationsStatus
   abacusai.ApiClass
   abacusai.FieldDescriptor
   abacusai.JSONSchema
   abacusai.WorkflowNodeInputMapping
   abacusai.WorkflowNodeInputSchema
   abacusai.WorkflowNodeOutputMapping
   abacusai.WorkflowNodeOutputSchema
   abacusai.TriggerConfig
   abacusai.WorkflowGraphNode
   abacusai.WorkflowGraphEdge
   abacusai.WorkflowGraph
   abacusai.AgentConversationMessage
   abacusai.WorkflowNodeTemplateConfig
   abacusai.WorkflowNodeTemplateInput
   abacusai.WorkflowNodeTemplateOutput
   abacusai.HotkeyPrompt
   abacusai._ApiClassFactory
   abacusai.BatchPredictionArgs
   abacusai.ForecastingBatchPredictionArgs
   abacusai.NamedEntityExtractionBatchPredictionArgs
   abacusai.PersonalizationBatchPredictionArgs
   abacusai.PredictiveModelingBatchPredictionArgs
   abacusai.PretrainedModelsBatchPredictionArgs
   abacusai.SentenceBoundaryDetectionBatchPredictionArgs
   abacusai.ThemeAnalysisBatchPredictionArgs
   abacusai.ChatLLMBatchPredictionArgs
   abacusai.TrainablePlugAndPlayBatchPredictionArgs
   abacusai.AIAgentBatchPredictionArgs
   abacusai._BatchPredictionArgsFactory
   abacusai.Blob
   abacusai.BlobInput
   abacusai.DatasetConfig
   abacusai.StreamingConnectorDatasetConfig
   abacusai.KafkaDatasetConfig
   abacusai._StreamingConnectorDatasetConfigFactory
   abacusai.DocumentType
   abacusai.OcrMode
   abacusai.ParsingConfig
   abacusai.DocumentProcessingConfig
   abacusai.DatasetDocumentProcessingConfig
   abacusai.IncrementalDatabaseConnectorConfig
   abacusai.AttachmentParsingConfig
   abacusai.ApplicationConnectorDatasetConfig
   abacusai.ConfluenceDatasetConfig
   abacusai.BoxDatasetConfig
   abacusai.GoogleAnalyticsDatasetConfig
   abacusai.GoogleDriveDatasetConfig
   abacusai.JiraDatasetConfig
   abacusai.OneDriveDatasetConfig
   abacusai.SharepointDatasetConfig
   abacusai.ZendeskDatasetConfig
   abacusai.AbacusUsageMetricsDatasetConfig
   abacusai.TeamsScraperDatasetConfig
   abacusai.FreshserviceDatasetConfig
   abacusai.SftpDatasetConfig
   abacusai._ApplicationConnectorDatasetConfigFactory
   abacusai.PredictionArguments
   abacusai.OptimizationPredictionArguments
   abacusai.TimeseriesAnomalyPredictionArguments
   abacusai.ChatLLMPredictionArguments
   abacusai.RegressionPredictionArguments
   abacusai.ForecastingPredictionArguments
   abacusai.CumulativeForecastingPredictionArguments
   abacusai.NaturalLanguageSearchPredictionArguments
   abacusai.FeatureStorePredictionArguments
   abacusai._PredictionArgumentsFactory
   abacusai.VectorStoreTextEncoder
   abacusai.VectorStoreConfig
   abacusai.ApiEnum
   abacusai.ProblemType
   abacusai.RegressionObjective
   abacusai.RegressionTreeHPOMode
   abacusai.PartialDependenceAnalysis
   abacusai.RegressionAugmentationStrategy
   abacusai.RegressionTargetTransform
   abacusai.RegressionTypeOfSplit
   abacusai.RegressionTimeSplitMethod
   abacusai.RegressionLossFunction
   abacusai.ExplainerType
   abacusai.SamplingMethodType
   abacusai.MergeMode
   abacusai.OperatorType
   abacusai.MarkdownOperatorInputType
   abacusai.FillLogic
   abacusai.BatchSize
   abacusai.HolidayCalendars
   abacusai.FileFormat
   abacusai.ExperimentationMode
   abacusai.PersonalizationTrainingMode
   abacusai.PersonalizationObjective
   abacusai.ForecastingObjective
   abacusai.ForecastingFrequency
   abacusai.ForecastingDataSplitType
   abacusai.ForecastingLossFunction
   abacusai.ForecastingLocalScaling
   abacusai.ForecastingFillMethod
   abacusai.ForecastingQuanitlesExtensionMethod
   abacusai.TimeseriesAnomalyDataSplitType
   abacusai.TimeseriesAnomalyTypeOfAnomaly
   abacusai.TimeseriesAnomalyUseHeuristic
   abacusai.NERObjective
   abacusai.NERModelType
   abacusai.NLPDocumentFormat
   abacusai.SentimentType
   abacusai.ClusteringImputationMethod
   abacusai.ConnectorType
   abacusai.ApplicationConnectorType
   abacusai.StreamingConnectorType
   abacusai.PythonFunctionArgumentType
   abacusai.PythonFunctionOutputArgumentType
   abacusai.LLMName
   abacusai.MonitorAlertType
   abacusai.FeatureDriftType
   abacusai.DataIntegrityViolationType
   abacusai.BiasType
   abacusai.AlertActionType
   abacusai.PythonFunctionType
   abacusai.EvalArtifactType
   abacusai.FieldDescriptorType
   abacusai.WorkflowNodeInputType
   abacusai.WorkflowNodeOutputType
   abacusai.StdDevThresholdType
   abacusai.DataType
   abacusai.AgentInterface
   abacusai.WorkflowNodeTemplateType
   abacusai.ProjectConfigType
   abacusai.CPUSize
   abacusai.MemorySize
   abacusai.ResponseSectionType
   abacusai.CodeLanguage
   abacusai.DeploymentConversationType
   abacusai.AgentClientType
   abacusai.SamplingConfig
   abacusai.NSamplingConfig
   abacusai.PercentSamplingConfig
   abacusai._SamplingConfigFactory
   abacusai.MergeConfig
   abacusai.LastNMergeConfig
   abacusai.TimeWindowMergeConfig
   abacusai._MergeConfigFactory
   abacusai.OperatorConfig
   abacusai.UnpivotConfig
   abacusai.MarkdownConfig
   abacusai.CrawlerTransformConfig
   abacusai.ExtractDocumentDataConfig
   abacusai.DataGenerationConfig
   abacusai.UnionTransformConfig
   abacusai._OperatorConfigFactory
   abacusai.TrainingConfig
   abacusai.PersonalizationTrainingConfig
   abacusai.RegressionTrainingConfig
   abacusai.ForecastingTrainingConfig
   abacusai.NamedEntityExtractionTrainingConfig
   abacusai.NaturalLanguageSearchTrainingConfig
   abacusai.ChatLLMTrainingConfig
   abacusai.SentenceBoundaryDetectionTrainingConfig
   abacusai.SentimentDetectionTrainingConfig
   abacusai.DocumentClassificationTrainingConfig
   abacusai.DocumentSummarizationTrainingConfig
   abacusai.DocumentVisualizationTrainingConfig
   abacusai.ClusteringTrainingConfig
   abacusai.ClusteringTimeseriesTrainingConfig
   abacusai.EventAnomalyTrainingConfig
   abacusai.TimeseriesAnomalyTrainingConfig
   abacusai.CumulativeForecastingTrainingConfig
   abacusai.ThemeAnalysisTrainingConfig
   abacusai.AIAgentTrainingConfig
   abacusai.CustomTrainedModelTrainingConfig
   abacusai.CustomAlgorithmTrainingConfig
   abacusai.OptimizationTrainingConfig
   abacusai._TrainingConfigFactory
   abacusai.DeployableAlgorithm
   abacusai.TimeWindowConfig
   abacusai.ForecastingMonitorConfig
   abacusai.StdDevThreshold
   abacusai.ItemAttributesStdDevThreshold
   abacusai.RestrictFeatureMappings
   abacusai.MonitorFilteringConfig
   abacusai.AlertConditionConfig
   abacusai.AccuracyBelowThresholdConditionConfig
   abacusai.FeatureDriftConditionConfig
   abacusai.TargetDriftConditionConfig
   abacusai.HistoryLengthDriftConditionConfig
   abacusai.DataIntegrityViolationConditionConfig
   abacusai.BiasViolationConditionConfig
   abacusai.PredictionCountConditionConfig
   abacusai._AlertConditionConfigFactory
   abacusai.AlertActionConfig
   abacusai.EmailActionConfig
   abacusai._AlertActionConfigFactory
   abacusai.MonitorThresholdConfig
   abacusai.FeatureMappingConfig
   abacusai.ProjectFeatureGroupTypeMappingsConfig
   abacusai.ConstraintConfig
   abacusai.ProjectFeatureGroupConfig
   abacusai.ConstraintProjectFeatureGroupConfig
   abacusai.ReviewModeProjectFeatureGroupConfig
   abacusai._ProjectFeatureGroupConfigFactory
   abacusai.PythonFunctionArgument
   abacusai.OutputVariableMapping
   abacusai.FeatureGroupExportConfig
   abacusai.FileConnectorExportConfig
   abacusai.DatabaseConnectorExportConfig
   abacusai._FeatureGroupExportConfigFactory
   abacusai.ResponseSection
   abacusai.AgentFlowButtonResponseSection
   abacusai.ImageUrlResponseSection
   abacusai.TextResponseSection
   abacusai.RuntimeSchemaResponseSection
   abacusai.CodeResponseSection
   abacusai.Base64ImageResponseSection
   abacusai.CollapseResponseSection
   abacusai.ListResponseSection
   abacusai.ChartResponseSection
   abacusai.DataframeResponseSection
   abacusai.ApiEndpoint
   abacusai.ApiKey
   abacusai.AppUserGroup
   abacusai.AppUserGroupSignInToken
   abacusai.ApplicationConnector
   abacusai.AudioGenSettings
   abacusai.BatchPrediction
   abacusai.BatchPredictionVersion
   abacusai.BatchPredictionVersionLogs
   abacusai.BotInfo
   abacusai.CategoricalRangeViolation
   abacusai.ChatMessage
   abacusai.ChatSession
   abacusai.ChatllmComputer
   abacusai.ChatllmProject
   abacusai.ChatllmReferralInvite
   abacusai.ChatllmTask
   abacusai.AgentResponse
   abacusai.ApiClient
   abacusai.ClientOptions
   abacusai.ReadOnlyClient
   abacusai.CodeAgentResponse
   abacusai.CodeAutocompleteResponse
   abacusai.CodeBot
   abacusai.CodeEdit
   abacusai.CodeEditResponse
   abacusai.CodeEdits
   abacusai.CodeSource
   abacusai.CodeSuggestionValidationResponse
   abacusai.ComputePointInfo
   abacusai.ConcatenationConfig
   abacusai.ConstantsAutocompleteResponse
   abacusai.CpuGpuMemorySpecs
   abacusai.CustomChatInstructions
   abacusai.CustomLossFunction
   abacusai.CustomMetric
   abacusai.CustomMetricVersion
   abacusai.CustomTrainFunctionInfo
   abacusai.DataConsistencyDuplication
   abacusai.DataMetrics
   abacusai.DataPrepLogs
   abacusai.DataQualityResults
   abacusai.DataUploadResult
   abacusai.DatabaseColumnFeatureMapping
   abacusai.DatabaseConnector
   abacusai.DatabaseConnectorColumn
   abacusai.DatabaseConnectorSchema
   abacusai.Dataset
   abacusai.DatasetColumn
   abacusai.DatasetVersion
   abacusai.DatasetVersionLogs
   abacusai.Deployment
   abacusai.DeploymentAuthToken
   abacusai.DeploymentConversation
   abacusai.DeploymentConversationEvent
   abacusai.DeploymentConversationExport
   abacusai.DeploymentStatistics
   abacusai.DocumentData
   abacusai.DocumentRetriever
   abacusai.DocumentRetrieverLookupResult
   abacusai.DocumentRetrieverVersion
   abacusai.DriftDistribution
   abacusai.DriftDistributions
   abacusai.Eda
   abacusai.EdaChartDescription
   abacusai.EdaCollinearity
   abacusai.EdaDataConsistency
   abacusai.EdaFeatureAssociation
   abacusai.EdaFeatureCollinearity
   abacusai.EdaForecastingAnalysis
   abacusai.EdaVersion
   abacusai.EditImageModels
   abacusai.EmbeddingFeatureDriftDistribution
   abacusai.ExecuteFeatureGroupOperation
   abacusai.ExternalApplication
   abacusai.ExternalInvite
   abacusai.ExtractedFields
   abacusai.Feature
   abacusai.FeatureDistribution
   abacusai.FeatureDriftRecord
   abacusai.FeatureDriftSummary
   abacusai.FeatureGroup
   abacusai.FeatureGroupDocument
   abacusai.FeatureGroupExport
   abacusai.FeatureGroupExportConfig
   abacusai.FeatureGroupExportDownloadUrl
   abacusai.FeatureGroupLineage
   abacusai.FeatureGroupRefreshExportConfig
   abacusai.FeatureGroupRow
   abacusai.FeatureGroupRowProcess
   abacusai.FeatureGroupRowProcessLogs
   abacusai.FeatureGroupRowProcessSummary
   abacusai.FeatureGroupTemplate
   abacusai.FeatureGroupTemplateVariableOptions
   abacusai.FeatureGroupVersion
   abacusai.FeatureGroupVersionLogs
   abacusai.FeatureImportance
   abacusai.FeatureMapping
   abacusai.FeaturePerformanceAnalysis
   abacusai.FileConnector
   abacusai.FileConnectorInstructions
   abacusai.FileConnectorVerification
   abacusai.FinetunedPretrainedModel
   abacusai.ForecastingAnalysisGraphData
   abacusai.ForecastingMonitorItemAnalysis
   abacusai.ForecastingMonitorSummary
   abacusai.FsEntry
   abacusai.FunctionLogs
   abacusai.GeneratedPitFeatureConfigOption
   abacusai.GraphDashboard
   abacusai.HoldoutAnalysis
   abacusai.HoldoutAnalysisVersion
   abacusai.HostedApp
   abacusai.HostedAppContainer
   abacusai.HostedAppFileRead
   abacusai.HostedModelToken
   abacusai.HumeVoice
   abacusai.ImageGenSettings
   abacusai.IndexingConfig
   abacusai.InferredDatabaseColumnToFeatureMappings
   abacusai.InferredFeatureMappings
   abacusai.ItemStatistics
   abacusai.LlmApp
   abacusai.LlmCodeBlock
   abacusai.LlmExecutionPreview
   abacusai.LlmExecutionResult
   abacusai.LlmGeneratedCode
   abacusai.LlmInput
   abacusai.LlmParameters
   abacusai.LlmResponse
   abacusai.MemoryOptions
   abacusai.MessagingConnectorResponse
   abacusai.Model
   abacusai.ModelArtifactsExport
   abacusai.ModelBlueprintExport
   abacusai.ModelBlueprintStage
   abacusai.ModelLocation
   abacusai.ModelMetrics
   abacusai.ModelMonitor
   abacusai.ModelMonitorOrgSummary
   abacusai.ModelMonitorSummary
   abacusai.ModelMonitorSummaryFromOrg
   abacusai.ModelMonitorVersion
   abacusai.ModelMonitorVersionMetricData
   abacusai.ModelTrainingTypeForDeployment
   abacusai.ModelUpload
   abacusai.ModelVersion
   abacusai.ModelVersionFeatureGroupSchema
   abacusai.ModificationLockInfo
   abacusai.Module
   abacusai.MonitorAlert
   abacusai.MonitorAlertVersion
   abacusai.MonitorDriftAndDistributions
   abacusai.NaturalLanguageExplanation
   abacusai.NestedFeature
   abacusai.NestedFeatureSchema
   abacusai.NewsSearchResult
   abacusai.NlpChatResponse
   abacusai.NullViolation
   abacusai.OrganizationExternalApplicationSettings
   abacusai.OrganizationGroup
   abacusai.OrganizationSearchResult
   abacusai.OrganizationSecret
   abacusai.PageData
   abacusai.Pipeline
   abacusai.PipelineReference
   abacusai.PipelineStep
   abacusai.PipelineStepVersion
   abacusai.PipelineStepVersionLogs
   abacusai.PipelineStepVersionReference
   abacusai.PipelineVersion
   abacusai.PipelineVersionLogs
   abacusai.PlaygroundText
   abacusai.PointInTimeFeature
   abacusai.PointInTimeFeatureInfo
   abacusai.PointInTimeGroup
   abacusai.PointInTimeGroupFeature
   abacusai.PredictionClient
   abacusai.PredictionDataset
   abacusai.PredictionFeatureGroup
   abacusai.PredictionInput
   abacusai.PredictionLogRecord
   abacusai.PredictionOperator
   abacusai.PredictionOperatorVersion
   abacusai.ProblemType
   abacusai.Project
   abacusai.ProjectConfig
   abacusai.ProjectFeatureGroup
   abacusai.ProjectFeatureGroupSchema
   abacusai.ProjectFeatureGroupSchemaVersion
   abacusai.ProjectValidation
   abacusai.PythonFunction
   abacusai.PythonPlotFunction
   abacusai.RangeViolation
   abacusai.RealtimeMonitor
   abacusai.RefreshPipelineRun
   abacusai.RefreshPolicy
   abacusai.RefreshSchedule
   abacusai.RegenerateLlmExternalApplication
   abacusai.ResolvedFeatureGroupTemplate
   abacusai.RoutingAction
   abacusai.Schema
   abacusai.SftpKey
   abacusai.StreamingAuthToken
   abacusai.StreamingClient
   abacusai.StreamingConnector
   abacusai.StreamingRowCount
   abacusai.StreamingSampleCode
   abacusai.TemplateNodeDetails
   abacusai.TestPointPredictions
   abacusai.ToneDetails
   abacusai.TrainingConfigOptions
   abacusai.TwitterSearchResult
   abacusai.Upload
   abacusai.UploadPart
   abacusai.UseCase
   abacusai.UseCaseRequirements
   abacusai.User
   abacusai.UserException
   abacusai.VideoGenSettings
   abacusai.VideoSearchResult
   abacusai.VoiceGenDetails
   abacusai.WebPageResponse
   abacusai.WebSearchResponse
   abacusai.WebSearchResult
   abacusai.Webhook
   abacusai.WorkflowGraphNodeDetails
   abacusai.WorkflowNodeTemplate


Functions
---------

.. autoapisummary::

   abacusai.get_clean_function_source_code_for_agent
   abacusai.validate_constructor_arg_types
   abacusai.validate_input_dict_param
   abacusai.deprecated_enums


Package Contents
----------------

.. py:class:: AbacusApi(client, method=None, docstring=None, score=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An Abacus API.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param method: The name of of the API method.
   :type method: str
   :param docstring: The docstring of the API method.
   :type docstring: str
   :param score: The relevance score of the API method.
   :type score: str


   .. py:attribute:: method
      :value: None



   .. py:attribute:: docstring
      :value: None



   .. py:attribute:: score
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Address(client, addressLine1=None, addressLine2=None, city=None, stateOrProvince=None, postalCode=None, country=None, additionalInfo=None, includeReverseCharge=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Address object

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param addressLine1: The first line of the address
   :type addressLine1: str
   :param addressLine2: The second line of the address
   :type addressLine2: str
   :param city: The city
   :type city: str
   :param stateOrProvince: The state or province
   :type stateOrProvince: str
   :param postalCode: The postal code
   :type postalCode: str
   :param country: The country
   :type country: str
   :param additionalInfo: Additional information for invoice
   :type additionalInfo: str
   :param includeReverseCharge: Whether the organization needs the reverse charge mechanism applied to invoices.
   :type includeReverseCharge: bool


   .. py:attribute:: address_line_1
      :value: None



   .. py:attribute:: address_line_2
      :value: None



   .. py:attribute:: city
      :value: None



   .. py:attribute:: state_or_province
      :value: None



   .. py:attribute:: postal_code
      :value: None



   .. py:attribute:: country
      :value: None



   .. py:attribute:: additional_info
      :value: None



   .. py:attribute:: include_reverse_charge
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Agent(client, name=None, agentId=None, createdAt=None, projectId=None, notebookId=None, predictFunctionName=None, sourceCode=None, agentConfig=None, memory=None, trainingRequired=None, agentExecutionConfig=None, codeSource={}, latestAgentVersion={}, draftWorkflowGraph={}, workflowGraph={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An AI agent.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The user-friendly name for the agent.
   :type name: str
   :param agentId: The unique identifier of the agent.
   :type agentId: str
   :param createdAt: Date and time at which the agent was created.
   :type createdAt: str
   :param projectId: The project this agent belongs to.
   :type projectId: str
   :param notebookId: The notebook associated with the agent.
   :type notebookId: str
   :param predictFunctionName: Name of the function found in the source code that will be executed run predictions through agent. It is not executed when this function is run.
   :type predictFunctionName: str
   :param sourceCode: Python code used to make the agent.
   :type sourceCode: str
   :param agentConfig: The config options used to create this agent.
   :type agentConfig: dict
   :param memory: Memory in GB specified for the deployment resources for the agent.
   :type memory: int
   :param trainingRequired: Whether training is required to deploy the latest agent code.
   :type trainingRequired: bool
   :param agentExecutionConfig: The config for arguments used to execute the agent.
   :type agentExecutionConfig: dict
   :param latestAgentVersion: The latest agent version.
   :type latestAgentVersion: AgentVersion
   :param codeSource: If a python model, information on the source code
   :type codeSource: CodeSource
   :param draftWorkflowGraph: The saved draft state of the workflow graph for the agent.
   :type draftWorkflowGraph: WorkflowGraph
   :param workflowGraph: The workflow graph for the agent.
   :type workflowGraph: WorkflowGraph


   .. py:attribute:: name
      :value: None



   .. py:attribute:: agent_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: predict_function_name
      :value: None



   .. py:attribute:: source_code
      :value: None



   .. py:attribute:: agent_config
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: training_required
      :value: None



   .. py:attribute:: agent_execution_config
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: latest_agent_version


   .. py:attribute:: draft_workflow_graph


   .. py:attribute:: workflow_graph


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Agent



   .. py:method:: describe()

      Retrieves a full description of the specified model.

      :param agent_id: Unique string identifier associated with the model.
      :type agent_id: str

      :returns: Description of the agent.
      :rtype: Agent



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      List all versions of an agent.

      :param limit: If provided, limits the number of agent versions returned.
      :type limit: int
      :param start_after_version: Unique string identifier of the version after which the list starts.
      :type start_after_version: str

      :returns: An array of Agent versions.
      :rtype: list[AgentVersion]



   .. py:method:: copy(project_id = None)

      Creates a copy of the input agent

      :param project_id: Project id to create the new agent to. By default it picks up the source agent's project id.
      :type project_id: str

      :returns: The newly generated agent.
      :rtype: Agent



   .. py:property:: description
      :type: str


      The description of the agent.


   .. py:property:: agent_interface
      :type: str


      The interface that the agent will be deployed with.


   .. py:property:: agent_connectors
      :type: dict


      A dictionary mapping ApplicationConnectorType keys to lists of OAuth scopes. Each key represents a specific application connector, while the value is a list of scopes that define the permissions granted to the application.


   .. py:method:: wait_for_publish(timeout=None)

      A waiting call until agent is published.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the agent publishing.

      :returns: A string describing the status of a agent publishing (pending, complete, etc.).
      :rtype: str



   .. py:method:: republish()

      Re-publishes the Agent and creates a new Agent Version.

      :returns: The new Agent Version.
      :rtype: AgentVersion



.. py:class:: AgentChatMessage(client, role=None, text=None, docIds=None, keywordArguments=None, segments=None, streamedData=None, streamedSectionData=None, agentWorkflowNodeId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single chat message with Agent Chat.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param role: The role of the message sender
   :type role: str
   :param text: A list of text segments for the message
   :type text: list[dict]
   :param docIds: A list of IDs of the uploaded document if the message has
   :type docIds: list[str]
   :param keywordArguments: User message only. A dictionary of keyword arguments used to generate response.
   :type keywordArguments: dict
   :param segments: A list of segments for the message
   :type segments: list[dict]
   :param streamedData: The streamed data for the message
   :type streamedData: str
   :param streamedSectionData: A list of streamed section data for the message
   :type streamedSectionData: list
   :param agentWorkflowNodeId: The workflow node name associated with the agent response.
   :type agentWorkflowNodeId: str


   .. py:attribute:: role
      :value: None



   .. py:attribute:: text
      :value: None



   .. py:attribute:: doc_ids
      :value: None



   .. py:attribute:: keyword_arguments
      :value: None



   .. py:attribute:: segments
      :value: None



   .. py:attribute:: streamed_data
      :value: None



   .. py:attribute:: streamed_section_data
      :value: None



   .. py:attribute:: agent_workflow_node_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AgentConversation(client, messages={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   List of messages with Agent chat

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param messages: list of messages in the conversation with agent.
   :type messages: AgentConversationMessage


   .. py:attribute:: messages


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AgentDataDocumentInfo(client, docId=None, filename=None, mimeType=None, size=None, pageCount=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Information for documents uploaded to agents.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param docId: The docstore Document ID of the document.
   :type docId: str
   :param filename: The file name of the uploaded document.
   :type filename: str
   :param mimeType: The mime type of the uploaded document.
   :type mimeType: str
   :param size: The total size of the uploaded document.
   :type size: int
   :param pageCount: The total number of pages in the uploaded document.
   :type pageCount: int


   .. py:attribute:: doc_id
      :value: None



   .. py:attribute:: filename
      :value: None



   .. py:attribute:: mime_type
      :value: None



   .. py:attribute:: size
      :value: None



   .. py:attribute:: page_count
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AgentDataExecutionResult(client, response=None, deploymentConversationId=None, docInfos={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Results of agent execution with uploaded data.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param response: The result of agent conversation execution.
   :type response: str
   :param deploymentConversationId: The unique identifier of the deployment conversation.
   :type deploymentConversationId: id
   :param docInfos: A list of dict containing information on documents uploaded to agent.
   :type docInfos: AgentDataDocumentInfo


   .. py:attribute:: response
      :value: None



   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: doc_infos


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AgentVersion(client, agentVersion=None, status=None, agentId=None, agentConfig=None, publishingStartedAt=None, publishingCompletedAt=None, pendingDeploymentIds=None, failedDeploymentIds=None, error=None, agentExecutionConfig=None, codeSource={}, workflowGraph={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of an AI agent.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param agentVersion: The unique identifier of an agent version.
   :type agentVersion: str
   :param status: The current status of the model.
   :type status: str
   :param agentId: A reference to the agent this version belongs to.
   :type agentId: str
   :param agentConfig: The config options used to create this agent.
   :type agentConfig: dict
   :param publishingStartedAt: The start time and date of the training process in ISO-8601 format.
   :type publishingStartedAt: str
   :param publishingCompletedAt: The end time and date of the training process in ISO-8601 format.
   :type publishingCompletedAt: str
   :param pendingDeploymentIds: List of deployment IDs where deployment is pending.
   :type pendingDeploymentIds: list
   :param failedDeploymentIds: List of failed deployment IDs.
   :type failedDeploymentIds: list
   :param error: Relevant error if the status is FAILED.
   :type error: str
   :param agentExecutionConfig: The config for arguments used to execute the agent.
   :type agentExecutionConfig: dict
   :param codeSource: If a python model, information on where the source code is located.
   :type codeSource: CodeSource
   :param workflowGraph: The workflow graph for the agent.
   :type workflowGraph: WorkflowGraph


   .. py:attribute:: agent_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: agent_id
      :value: None



   .. py:attribute:: agent_config
      :value: None



   .. py:attribute:: publishing_started_at
      :value: None



   .. py:attribute:: publishing_completed_at
      :value: None



   .. py:attribute:: pending_deployment_ids
      :value: None



   .. py:attribute:: failed_deployment_ids
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: agent_execution_config
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: workflow_graph


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: AgentVersion



   .. py:method:: describe()

      Retrieves a full description of the specified agent version.

      :param agent_version: Unique string identifier of the agent version.
      :type agent_version: str

      :returns: A agent version.
      :rtype: AgentVersion



   .. py:method:: wait_for_publish(timeout=None)

      A waiting call until agent gets published.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the model version under training.

      :returns: A string describing the status of a model training (pending, complete, etc.).
      :rtype: str



.. py:class:: AiBuildingTask(client, task=None, taskType=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A task for Data Science Co-pilot to help build AI.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param task: The task to be performed
   :type task: str
   :param taskType: The type of task
   :type taskType: str


   .. py:attribute:: task
      :value: None



   .. py:attribute:: task_type
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Algorithm(client, name=None, problemType=None, createdAt=None, updatedAt=None, isDefaultEnabled=None, trainingInputMappings=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, configOptions=None, algorithmId=None, useGpu=None, algorithmTrainingConfig=None, onlyOfflineDeployable=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Customer created algorithm

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the algorithm
   :type name: str
   :param problemType: The type of the problem this algorithm will work on
   :type problemType: str
   :param createdAt: When the algorithm was created
   :type createdAt: str
   :param updatedAt: When the algorithm was last updated
   :type updatedAt: str
   :param isDefaultEnabled: Whether train with the algorithm by default
   :type isDefaultEnabled: bool
   :param trainingInputMappings: The mappings for train function parameters' names, e.g. names for training data, name for training config
   :type trainingInputMappings: dict
   :param trainFunctionName: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
   :type trainFunctionName: str
   :param predictFunctionName: Name of the function found in the source code that will be executed run predictions through model. It is not executed when this function is run.
   :type predictFunctionName: str
   :param predictManyFunctionName: Name of the function found in the source code that will be executed for batch prediction of the model. It is not executed when this function is run.
   :type predictManyFunctionName: str
   :param initializeFunctionName: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model
   :type initializeFunctionName: str
   :param configOptions: Map dataset types and configs to train function parameter names
   :type configOptions: dict
   :param algorithmId: The unique identifier of the algorithm
   :type algorithmId: str
   :param useGpu: Whether to use gpu for model training
   :type useGpu: bool
   :param algorithmTrainingConfig: The algorithm specific training config
   :type algorithmTrainingConfig: dict
   :param onlyOfflineDeployable: Whether or not the algorithm is only allowed to be deployed offline
   :type onlyOfflineDeployable: bool
   :param codeSource: Info about the source code of the algorithm
   :type codeSource: CodeSource


   .. py:attribute:: name
      :value: None



   .. py:attribute:: problem_type
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: is_default_enabled
      :value: None



   .. py:attribute:: training_input_mappings
      :value: None



   .. py:attribute:: train_function_name
      :value: None



   .. py:attribute:: predict_function_name
      :value: None



   .. py:attribute:: predict_many_function_name
      :value: None



   .. py:attribute:: initialize_function_name
      :value: None



   .. py:attribute:: config_options
      :value: None



   .. py:attribute:: algorithm_id
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: algorithm_training_config
      :value: None



   .. py:attribute:: only_offline_deployable
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Annotation(client, annotationType=None, annotationValue=None, comments=None, metadata=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An Annotation Store Annotation

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param annotationType: A name determining the type of annotation and how to interpret the annotation value data, e.g. as a label, bounding box, etc.
   :type annotationType: str
   :param annotationValue: JSON-compatible value of the annotation. The format of the value is determined by the annotation type.
   :type annotationValue: dict
   :param comments: Comments about the annotation. This is a dictionary of feature name to the corresponding comment.
   :type comments: dict
   :param metadata: Metadata about the annotation.
   :type metadata: dict


   .. py:attribute:: annotation_type
      :value: None



   .. py:attribute:: annotation_value
      :value: None



   .. py:attribute:: comments
      :value: None



   .. py:attribute:: metadata
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AnnotationConfig(client, featureAnnotationConfigs=None, labels=None, statusFeature=None, commentsFeatures=None, metadataFeature=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Annotation config for a feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureAnnotationConfigs: List of feature annotation configs
   :type featureAnnotationConfigs: list
   :param labels: List of labels
   :type labels: list
   :param statusFeature: Name of the feature that contains the status of the annotation (Optional)
   :type statusFeature: str
   :param commentsFeatures: Features that contain comments for the annotation (Optional)
   :type commentsFeatures: list
   :param metadataFeature: Name of the feature that contains the metadata for the annotation (Optional)
   :type metadataFeature: str


   .. py:attribute:: feature_annotation_configs
      :value: None



   .. py:attribute:: labels
      :value: None



   .. py:attribute:: status_feature
      :value: None



   .. py:attribute:: comments_features
      :value: None



   .. py:attribute:: metadata_feature
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AnnotationDocument(client, docId=None, featureGroupRowIdentifier=None, featureGroupRowIndex=None, totalRows=None, isAnnotationPresent=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Document to be annotated.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param docId: The docstore Document ID of the document.
   :type docId: str
   :param featureGroupRowIdentifier: The key value of the feature group row the annotation is on. Usually the primary key value.
   :type featureGroupRowIdentifier: str
   :param featureGroupRowIndex: The index of the document row in the feature group.
   :type featureGroupRowIndex: int
   :param totalRows: The total number of rows in the feature group.
   :type totalRows: int
   :param isAnnotationPresent: Whether the document already has an annotation. Returns None if feature group is not under annotations review mode.
   :type isAnnotationPresent: bool


   .. py:attribute:: doc_id
      :value: None



   .. py:attribute:: feature_group_row_identifier
      :value: None



   .. py:attribute:: feature_group_row_index
      :value: None



   .. py:attribute:: total_rows
      :value: None



   .. py:attribute:: is_annotation_present
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AnnotationEntry(client, featureGroupId=None, featureName=None, docId=None, featureGroupRowIdentifier=None, updatedAt=None, annotationEntryMarker=None, status=None, lockedUntil=None, verificationInfo=None, annotation={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An Annotation Store entry for an Annotation

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: The ID of the feature group this annotation belongs to.
   :type featureGroupId: str
   :param featureName: name of the feature this annotation is on.
   :type featureName: str
   :param docId: The ID of the primary document the annotation is on.
   :type docId: str
   :param featureGroupRowIdentifier: The key value of the feature group row the annotation is on (cast to string). Usually the primary key value.
   :type featureGroupRowIdentifier: str
   :param updatedAt: Most recent time the annotation entry was modified, e.g. creation or update time.
   :type updatedAt: str
   :param annotationEntryMarker: The entry marker for the annotation.
   :type annotationEntryMarker: str
   :param status: The status of labeling the document.
   :type status: str
   :param lockedUntil: The time until which the document is locked for editing,  in ISO-8601 format.
   :type lockedUntil: str
   :param verificationInfo: The verification info for the annotation.
   :type verificationInfo: dict
   :param annotation: json-compatible structure holding the type and value of the annotation.
   :type annotation: Annotation


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: feature_name
      :value: None



   .. py:attribute:: doc_id
      :value: None



   .. py:attribute:: feature_group_row_identifier
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: annotation_entry_marker
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: locked_until
      :value: None



   .. py:attribute:: verification_info
      :value: None



   .. py:attribute:: annotation


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AnnotationsStatus(client, total=None, done=None, inProgress=None, todo=None, latestUpdatedAt=None, isMaterializationNeeded=None, latestMaterializedAnnotationConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The status of annotations for a feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param total: The total number of documents annotated
   :type total: int
   :param done: The number of documents annotated
   :type done: int
   :param inProgress: The number of documents currently being annotated
   :type inProgress: int
   :param todo: The number of documents that need to be annotated
   :type todo: int
   :param latestUpdatedAt: The latest time an annotation was updated (ISO-8601 format)
   :type latestUpdatedAt: str
   :param isMaterializationNeeded: Whether feature group needs to be materialized before using for annotations
   :type isMaterializationNeeded: bool
   :param latestMaterializedAnnotationConfig: The annotation config corresponding to the latest materialized feature group
   :type latestMaterializedAnnotationConfig: AnnotationConfig


   .. py:attribute:: total
      :value: None



   .. py:attribute:: done
      :value: None



   .. py:attribute:: in_progress
      :value: None



   .. py:attribute:: todo
      :value: None



   .. py:attribute:: latest_updated_at
      :value: None



   .. py:attribute:: is_materialization_needed
      :value: None



   .. py:attribute:: latest_materialized_annotation_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ApiClass

   Bases: :py:obj:`abc.ABC`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: _upper_snake_case_keys
      :type:  bool
      :value: False



   .. py:attribute:: _support_kwargs
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


   .. py:method:: _get_builder()
      :classmethod:



   .. py:method:: __str__()


   .. py:method:: _repr_html_()


   .. py:method:: __getitem__(item)


   .. py:method:: __setitem__(item, value)


   .. py:method:: _unset_item(item)


   .. py:method:: get(item, default = None)


   .. py:method:: pop(item, default = NotImplemented)


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(input_dict)
      :classmethod:



.. py:function:: get_clean_function_source_code_for_agent(func)

.. py:function:: validate_constructor_arg_types(friendly_class_name=None)

.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[])

.. py:class:: FieldDescriptor

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Configs for vector store indexing.

   :param field: The field to be extracted. This will be used as the key in the response.
   :type field: str
   :param description: The description of this field. If not included, the response_field will be used.
   :type description: str
   :param example_extraction: An example of this extracted field.
   :type example_extraction: Union[str, int, bool, float]
   :param type: The type of this field. If not provided, the default type is STRING.
   :type type: FieldDescriptorType


   .. py:attribute:: field
      :type:  str


   .. py:attribute:: description
      :type:  str
      :value: None



   .. py:attribute:: example_extraction
      :type:  Union[str, int, bool, float, list, dict]
      :value: None



   .. py:attribute:: type
      :type:  abacusai.api_class.enums.FieldDescriptorType


.. py:class:: JSONSchema

   .. py:method:: from_fields_list(fields_list)
      :classmethod:



   .. py:method:: to_fields_list(json_schema)
      :classmethod:



.. py:class:: WorkflowNodeInputMapping

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents a mapping of inputs to a workflow node.

   :param name: The name of the input variable of the node function.
   :type name: str
   :param variable_type: The type of the input. If the type is `IGNORE`, the input will be ignored.
   :type variable_type: Union[WorkflowNodeInputType, str]
   :param variable_source: The name of the node this variable is sourced from.
                           If the type is `WORKFLOW_VARIABLE`, the value given by the source node will be directly used.
                           If the type is `USER_INPUT`, the value given by the source node will be used as the default initial value before the user edits it.
                           Set to `None` if the type is `USER_INPUT` and the variable doesn't need a pre-filled initial value.
   :type variable_source: str
   :param is_required: Indicates whether the input is required. Defaults to True.
   :type is_required: bool
   :param description: The description of this input.
   :type description: str
   :param constant_value: The constant value of this input if variable type is CONSTANT. Only applicable for template nodes.
   :type constant_value: str


   .. py:attribute:: name
      :type:  str


   .. py:attribute:: variable_type
      :type:  abacusai.api_class.enums.WorkflowNodeInputType


   .. py:attribute:: variable_source
      :type:  str
      :value: None



   .. py:attribute:: source_prop
      :type:  str
      :value: None



   .. py:attribute:: is_required
      :type:  bool
      :value: True



   .. py:attribute:: description
      :type:  str
      :value: None



   .. py:attribute:: constant_value
      :type:  str
      :value: None



   .. py:method:: __post_init__()


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(mapping)
      :classmethod:



.. py:class:: WorkflowNodeInputSchema

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`, :py:obj:`JSONSchema`


   A schema conformant to react-jsonschema-form for workflow node input.

   To initialize a WorkflowNodeInputSchema dependent on another node's output, use the from_workflow_node method.

   :param json_schema: The JSON schema for the input, conformant to react-jsonschema-form specification. Must define keys like "title", "type", and "properties". Supported elements include Checkbox, Radio Button, Dropdown, Textarea, Number, Date, and file upload. Nested elements, arrays, and other complex types are not supported.
   :type json_schema: dict
   :param ui_schema: The UI schema for the input, conformant to react-jsonschema-form specification.
   :type ui_schema: dict


   .. py:attribute:: json_schema
      :type:  dict


   .. py:attribute:: ui_schema
      :type:  dict


   .. py:attribute:: schema_source
      :type:  str
      :value: None



   .. py:attribute:: schema_prop
      :type:  str
      :value: None



   .. py:attribute:: runtime_schema
      :type:  bool
      :value: False



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(schema)
      :classmethod:



   .. py:method:: from_workflow_node(schema_source, schema_prop)
      :classmethod:


      Creates a WorkflowNodeInputSchema instance which references the schema generated by a WorkflowGraphNode.

      :param schema_source: The name of the source WorkflowGraphNode.
      :type schema_source: str
      :param schema_prop: The name of the input schema parameter which source node outputs.
      :type schema_prop: str



   .. py:method:: from_input_mappings(input_mappings)
      :classmethod:


      Creates a json_schema for the input schema of the node from it's input mappings.

      :param input_mappings: The input mappings for the node.
      :type input_mappings: List[WorkflowNodeInputMapping]



.. py:class:: WorkflowNodeOutputMapping

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents a mapping of output from a workflow node.

   :param name: The name of the output.
   :type name: str
   :param variable_type: The type of the output in the form of an enum or a string.
   :type variable_type: Union[WorkflowNodeOutputType, str]
   :param description: The description of this output.
   :type description: str


   .. py:attribute:: name
      :type:  str


   .. py:attribute:: variable_type
      :type:  Union[abacusai.api_class.enums.WorkflowNodeOutputType, str]


   .. py:attribute:: description
      :type:  str
      :value: None



   .. py:method:: __post_init__()


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(mapping)
      :classmethod:



.. py:class:: WorkflowNodeOutputSchema

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`, :py:obj:`JSONSchema`


   A schema conformant to react-jsonschema-form for a workflow node output.

   :param json_schema: The JSON schema for the output, conformant to react-jsonschema-form specification.
   :type json_schema: dict


   .. py:attribute:: json_schema
      :type:  dict


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(schema)
      :classmethod:



.. py:class:: TriggerConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents the configuration for a trigger workflow node.

   :param sleep_time: The time in seconds to wait before the node gets executed again.
   :type sleep_time: int


   .. py:attribute:: sleep_time
      :type:  int
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(configs)
      :classmethod:



.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents a node in an Agent workflow graph.

   :param name: A unique name for the workflow node.
   :type name: str
   :param input_mappings: List of input mappings for the node. Each arg/kwarg of the node function should have a corresponding input mapping.
   :type input_mappings: List[WorkflowNodeInputMapping]
   :param output_mappings: List of outputs for the node. Each field in the returned dict/AgentResponse must have a corresponding output in the list.
   :type output_mappings: List[str]
   :param function: The callable node function reference.
   :type function: callable
   :param input_schema: The react json schema for the user input variables.
   :type input_schema: WorkflowNodeInputSchema
   :param output_schema: The list of outputs to be shown on UI. Each output corresponds to a field in the output mappings of the node.
   :type output_schema: List[str]

   Additional Attributes:
       function_name (str): The name of the function.
       source_code (str): The source code of the function.
       trigger_config (TriggerConfig): The configuration for a trigger workflow node.


   .. py:attribute:: template_metadata
      :value: None



   .. py:attribute:: trigger_config
      :value: None



   .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
      :classmethod:



   .. py:method:: from_template(template_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None, sleep_time = None)
      :classmethod:



   .. py:method:: from_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
      :classmethod:



   .. py:method:: from_system_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
      :classmethod:



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: is_template_node()


   .. py:method:: is_trigger_node()


   .. py:method:: from_dict(node)
      :classmethod:



   .. py:method:: __setattr__(name, value)


   .. py:method:: __getattribute__(name)


   .. py:class:: Outputs(node)

      .. py:attribute:: node


      .. py:method:: __getattr__(name)



   .. py:property:: outputs


.. py:class:: WorkflowGraphEdge(source, target, details = None)

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents an edge in an Agent workflow graph.

   To make an edge conditional, provide {'EXECUTION_CONDITION': '<condition>'} key-value in the details dictionary.
   The condition should be a Pythonic expression string that evaluates to a boolean value and only depends on the outputs of the source node of the edge.

   :param source: The name of the source node of the edge.
   :type source: str
   :param target: The name of the target node of the edge.
   :type target: str
   :param details: Additional details about the edge. Like the condition for edge execution.
   :type details: dict


   .. py:attribute:: source
      :type:  Union[str, WorkflowGraphNode]


   .. py:attribute:: target
      :type:  Union[str, WorkflowGraphNode]


   .. py:attribute:: details
      :type:  dict


   .. py:method:: to_nx_edge()


.. py:class:: WorkflowGraph

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents an Agent workflow graph.

   :param nodes: A list of nodes in the workflow graph.
   :type nodes: List[WorkflowGraphNode]
   :param primary_start_node: The primary node to start the workflow from.
   :type primary_start_node: Union[str, WorkflowGraphNode]
   :param common_source_code: Common source code that can be used across all nodes.
   :type common_source_code: str


   .. py:attribute:: nodes
      :type:  List[WorkflowGraphNode]
      :value: []



   .. py:attribute:: edges
      :type:  List[Union[WorkflowGraphEdge, Tuple[WorkflowGraphNode, WorkflowGraphNode, dict], Tuple[str, str, dict]]]
      :value: []



   .. py:attribute:: primary_start_node
      :type:  Union[str, WorkflowGraphNode]
      :value: None



   .. py:attribute:: common_source_code
      :type:  str
      :value: None



   .. py:attribute:: specification_type
      :type:  str
      :value: 'data_flow'



   .. py:method:: __post_init__()


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(graph)
      :classmethod:



.. py:class:: AgentConversationMessage

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Message format for agent conversation

   :param is_user: Whether the message is from the user.
   :type is_user: bool
   :param text: The message's text.
   :type text: str
   :param document_contents: Dict of document name to document text in case of any document present.
   :type document_contents: dict


   .. py:attribute:: is_user
      :type:  bool
      :value: None



   .. py:attribute:: text
      :type:  str
      :value: None



   .. py:attribute:: document_contents
      :type:  dict
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: WorkflowNodeTemplateConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents a WorkflowNode template config.

   :param name: A unique name of the config.
   :type name: str
   :param description: The description of this config.
   :type description: str
   :param default_value: Default value of the config to be used if value is not provided during node initialization.
   :type default_value: str
   :param is_required: Indicates whether the config is required. Defaults to False.
   :type is_required: bool


   .. py:attribute:: name
      :type:  str


   .. py:attribute:: description
      :type:  str
      :value: None



   .. py:attribute:: default_value
      :type:  str
      :value: None



   .. py:attribute:: is_required
      :type:  bool
      :value: False



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(mapping)
      :classmethod:



.. py:class:: WorkflowNodeTemplateInput

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents an input to the workflow node generated using template.

   :param name: A unique name of the input.
   :type name: str
   :param is_required: Indicates whether the input is required. Defaults to False.
   :type is_required: bool
   :param description: The description of this input.
   :type description: str


   .. py:attribute:: name
      :type:  str


   .. py:attribute:: is_required
      :type:  bool
      :value: False



   .. py:attribute:: description
      :type:  str
      :value: ''



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(mapping)
      :classmethod:



.. py:class:: WorkflowNodeTemplateOutput

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Represents an output returned by the workflow node generated using template.

   :param name: The name of the output.
   :type name: str
   :param variable_type: The type of the output.
   :type variable_type: WorkflowNodeOutputType
   :param description: The description of this output.
   :type description: str


   .. py:attribute:: name
      :type:  str


   .. py:attribute:: variable_type
      :type:  abacusai.api_class.enums.WorkflowNodeOutputType


   .. py:attribute:: description
      :type:  str
      :value: ''



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



   .. py:method:: from_dict(mapping)
      :classmethod:



.. py:class:: HotkeyPrompt

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   A config class for a Data Science Co-Pilot Hotkey

   :param prompt: The prompt to send to Data Science Co-Pilot
   :type prompt: str
   :param title: A short, descriptive title for the prompt. If not provided, one will be automatically generated.
   :type title: str


   .. py:attribute:: prompt
      :type:  str


   .. py:attribute:: title
      :type:  str
      :value: None



   .. py:attribute:: disable_problem_type_context
      :type:  bool
      :value: True



   .. py:attribute:: ignore_history
      :type:  bool
      :value: None



.. py:class:: _ApiClassFactory

   Bases: :py:obj:`abc.ABC`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class
      :value: None



   .. py:attribute:: config_class_key
      :value: None



   .. py:attribute:: config_class_map


   .. py:method:: from_dict(config)
      :classmethod:



.. py:class:: BatchPredictionArgs

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for Batch Prediction args specific to problem type.


   .. py:attribute:: _support_kwargs
      :type:  bool
      :value: True



   .. py:attribute:: kwargs
      :type:  dict


   .. py:attribute:: problem_type
      :type:  abacusai.api_class.enums.ProblemType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: ForecastingBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the FORECASTING problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation
   :type for_eval: bool
   :param predictions_start_date: The start date for predictions. Accepts timestamp integers and strings in many standard formats such as YYYY-MM-DD, YYYY-MM-DD HH:MM:SS, or YYYY-MM-DDTHH:MM:SS. If not specified, the prediction start date will be automatically defined.
   :type predictions_start_date: str
   :param use_prediction_offset: If True, use prediction offset.
   :type use_prediction_offset: bool
   :param start_date_offset: Sets prediction start date as this offset relative to the prediction start date.
   :type start_date_offset: int
   :param forecasting_horizon: The number of timestamps to predict in the future. Range: [1, 1000].
   :type forecasting_horizon: int
   :param item_attributes_to_include_in_the_result: List of columns to include in the prediction output.
   :type item_attributes_to_include_in_the_result: list
   :param explain_predictions: If True, calculates explanations for the forecasted values along with predictions.
   :type explain_predictions: bool
   :param create_monitor: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
   :type create_monitor: bool


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: predictions_start_date
      :type:  str
      :value: None



   .. py:attribute:: use_prediction_offset
      :type:  bool
      :value: None



   .. py:attribute:: start_date_offset
      :type:  int
      :value: None



   .. py:attribute:: forecasting_horizon
      :type:  int
      :value: None



   .. py:attribute:: item_attributes_to_include_in_the_result
      :type:  list
      :value: None



   .. py:attribute:: explain_predictions
      :type:  bool
      :value: None



   .. py:attribute:: create_monitor
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: NamedEntityExtractionBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the NAMED_ENTITY_EXTRACTION problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: PersonalizationBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the PERSONALIZATION problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool
   :param number_of_items: Number of items to recommend.
   :type number_of_items: int
   :param item_attributes_to_include_in_the_result: List of columns to include in the prediction output.
   :type item_attributes_to_include_in_the_result: list
   :param score_field: If specified, relative item scores will be returned using a field with this name
   :type score_field: str


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: number_of_items
      :type:  int
      :value: None



   .. py:attribute:: item_attributes_to_include_in_the_result
      :type:  list
      :value: None



   .. py:attribute:: score_field
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: PredictiveModelingBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the PREDICTIVE_MODELING problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool
   :param explainer_type: The type of explainer to use to generate explanations on the batch prediction.
   :type explainer_type: enums.ExplainerType
   :param number_of_samples_to_use_for_explainer: Number Of Samples To Use For Kernel Explainer.
   :type number_of_samples_to_use_for_explainer: int
   :param include_multi_class_explanations: If True, Includes explanations for all classes in multi-class classification.
   :type include_multi_class_explanations: bool
   :param features_considered_constant_for_explanations: Comma separate list of fields to treat as constant in SHAP explanations.
   :type features_considered_constant_for_explanations: str
   :param importance_of_records_in_nested_columns: Returns importance of each index in the specified nested column instead of SHAP column explanations.
   :type importance_of_records_in_nested_columns: str
   :param explanation_filter_lower_bound: If set explanations will be limited to predictions above this value, Range: [0, 1].
   :type explanation_filter_lower_bound: float
   :param explanation_filter_upper_bound: If set explanations will be limited to predictions below this value, Range: [0, 1].
   :type explanation_filter_upper_bound: float
   :param explanation_filter_label: For classification problems specifies the label to which the explanation bounds are applied.
   :type explanation_filter_label: str
   :param output_columns: A list of column names to include in the prediction result.
   :type output_columns: list
   :param explain_predictions: If True, calculates explanations for the predicted values along with predictions.
   :type explain_predictions: bool
   :param create_monitor: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
   :type create_monitor: bool


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: explainer_type
      :type:  abacusai.api_class.enums.ExplainerType
      :value: None



   .. py:attribute:: number_of_samples_to_use_for_explainer
      :type:  int
      :value: None



   .. py:attribute:: include_multi_class_explanations
      :type:  bool
      :value: None



   .. py:attribute:: features_considered_constant_for_explanations
      :type:  str
      :value: None



   .. py:attribute:: importance_of_records_in_nested_columns
      :type:  str
      :value: None



   .. py:attribute:: explanation_filter_lower_bound
      :type:  float
      :value: None



   .. py:attribute:: explanation_filter_upper_bound
      :type:  float
      :value: None



   .. py:attribute:: explanation_filter_label
      :type:  str
      :value: None



   .. py:attribute:: output_columns
      :type:  list
      :value: None



   .. py:attribute:: explain_predictions
      :type:  bool
      :value: None



   .. py:attribute:: create_monitor
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: PretrainedModelsBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the PRETRAINED_MODELS problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool
   :param files_output_location_prefix: The output location prefix for the files.
   :type files_output_location_prefix: str
   :param channel_id_to_label_map: JSON string for the map from channel ids to their labels.
   :type channel_id_to_label_map: str


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: files_output_location_prefix
      :type:  str
      :value: None



   .. py:attribute:: channel_id_to_label_map
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: SentenceBoundaryDetectionBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the SENTENCE_BOUNDARY_DETECTION problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation
   :type for_eval: bool
   :param explode_output: Explode data so there is one sentence per row.
   :type explode_output: bool


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: explode_output
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ThemeAnalysisBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the THEME_ANALYSIS problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool
   :param analysis_frequency: The length of each analysis interval.
   :type analysis_frequency: str
   :param start_date: The end point for predictions.
   :type start_date: str
   :param analysis_days: How many days to analyze.
   :type analysis_days: int


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: analysis_frequency
      :type:  str
      :value: None



   .. py:attribute:: start_date
      :type:  str
      :value: None



   .. py:attribute:: analysis_days
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ChatLLMBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the ChatLLM problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: TrainablePlugAndPlayBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the TrainablePlugAndPlay problem type

   :param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
   :type for_eval: bool
   :param create_monitor: Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
   :type create_monitor: bool


   .. py:attribute:: for_eval
      :type:  bool
      :value: None



   .. py:attribute:: create_monitor
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: AIAgentBatchPredictionArgs

   Bases: :py:obj:`BatchPredictionArgs`


   Batch Prediction Config for the AIAgents problem type


   .. py:method:: __post_init__()


.. py:class:: _BatchPredictionArgsFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'problem_type'



   .. py:attribute:: config_class_map


.. py:class:: Blob(contents, mime_type = None, filename = None, size = None)

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An object for storing and passing file data.
   In AI Agents, if a function accepts file upload as an argument, the uploaded file is passed as a Blob object. If a function returns a Blob object, it will be rendered as a file download.

   :param contents: The binary contents of the blob.
   :type contents: bytes
   :param mime_type: The mime type of the blob.
   :type mime_type: str
   :param filename: The original filename of the blob.
   :type filename: str
   :param size: The size of the blob in bytes.
   :type size: int


   .. py:attribute:: filename
      :type:  str


   .. py:attribute:: contents
      :type:  bytes


   .. py:attribute:: mime_type
      :type:  str


   .. py:attribute:: size
      :type:  int


   .. py:method:: from_local_file(file_path)
      :classmethod:



   .. py:method:: from_contents(contents, filename = None, mime_type = None)
      :classmethod:



.. py:class:: BlobInput(filename = None, contents = None, mime_type = None, size = None)

   Bases: :py:obj:`Blob`


   An object for storing and passing file data.
   In AI Agents, if a function accepts file upload as an argument, the uploaded file is passed as a BlobInput object.

   :param filename: The original filename of the blob.
   :type filename: str
   :param contents: The binary contents of the blob.
   :type contents: bytes
   :param mime_type: The mime type of the blob.
   :type mime_type: str
   :param size: The size of the blob in bytes.
   :type size: int


.. py:class:: DatasetConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for dataset configs

   :param is_documentset: Whether the dataset is a document set
   :type is_documentset: bool


   .. py:attribute:: is_documentset
      :type:  bool
      :value: None



.. py:class:: StreamingConnectorDatasetConfig

   Bases: :py:obj:`abacusai.api_class.dataset.DatasetConfig`


   An abstract class for dataset configs specific to streaming connectors.

   :param streaming_connector_type: The type of streaming connector
   :type streaming_connector_type: StreamingConnectorType


   .. py:attribute:: streaming_connector_type
      :type:  abacusai.api_class.enums.StreamingConnectorType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: KafkaDatasetConfig

   Bases: :py:obj:`StreamingConnectorDatasetConfig`


   Dataset config for Kafka Streaming Connector

   :param topic: The kafka topic to consume
   :type topic: str


   .. py:attribute:: topic
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: _StreamingConnectorDatasetConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'streaming_connector_type'



   .. py:attribute:: config_class_map


.. py:class:: DocumentType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: SIMPLE_TEXT
      :value: 'SIMPLE_TEXT'



   .. py:attribute:: TEXT
      :value: 'TEXT'



   .. py:attribute:: TABLES_AND_FORMS
      :value: 'TABLES_AND_FORMS'



   .. py:attribute:: EMBEDDED_IMAGES
      :value: 'EMBEDDED_IMAGES'



   .. py:attribute:: SCANNED_TEXT
      :value: 'SCANNED_TEXT'



   .. py:attribute:: COMPREHENSIVE_MARKDOWN
      :value: 'COMPREHENSIVE_MARKDOWN'



   .. py:method:: is_ocr_forced(document_type)
      :classmethod:



.. py:class:: OcrMode

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AUTO
      :value: 'AUTO'



   .. py:attribute:: DEFAULT
      :value: 'DEFAULT'



   .. py:attribute:: LAYOUT
      :value: 'LAYOUT'



   .. py:attribute:: SCANNED
      :value: 'SCANNED'



   .. py:attribute:: COMPREHENSIVE
      :value: 'COMPREHENSIVE'



   .. py:attribute:: COMPREHENSIVE_V2
      :value: 'COMPREHENSIVE_V2'



   .. py:attribute:: COMPREHENSIVE_TABLE_MD
      :value: 'COMPREHENSIVE_TABLE_MD'



   .. py:attribute:: COMPREHENSIVE_FORM_MD
      :value: 'COMPREHENSIVE_FORM_MD'



   .. py:attribute:: COMPREHENSIVE_FORM_AND_TABLE_MD
      :value: 'COMPREHENSIVE_FORM_AND_TABLE_MD'



   .. py:attribute:: TESSERACT_FAST
      :value: 'TESSERACT_FAST'



   .. py:attribute:: LLM
      :value: 'LLM'



   .. py:attribute:: AUGMENTED_LLM
      :value: 'AUGMENTED_LLM'



   .. py:method:: aws_ocr_modes()
      :classmethod:



.. py:class:: ParsingConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Custom config for dataset parsing.

   :param escape: Escape character for CSV files. Defaults to '"'.
   :type escape: str
   :param csv_delimiter: Delimiter for CSV files. Defaults to None.
   :type csv_delimiter: str
   :param file_path_with_schema: Path to the file with schema. Defaults to None.
   :type file_path_with_schema: str


   .. py:attribute:: escape
      :type:  str
      :value: '"'



   .. py:attribute:: csv_delimiter
      :type:  str
      :value: None



   .. py:attribute:: file_path_with_schema
      :type:  str
      :value: None



.. py:class:: DocumentProcessingConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Document processing configuration.

   :param document_type: Type of document. Can be one of Text, Tables and Forms, Embedded Images, etc. If not specified, type will be decided automatically.
   :type document_type: DocumentType
   :param highlight_relevant_text: Whether to extract bounding boxes and highlight relevant text in search results. Defaults to False.
   :type highlight_relevant_text: bool
   :param extract_bounding_boxes: Whether to perform OCR and extract bounding boxes. If False, no OCR will be done but only the embedded text from digital documents will be extracted. Defaults to False.
   :type extract_bounding_boxes: bool
   :param ocr_mode: OCR mode. There are different OCR modes available for different kinds of documents and use cases. This option only takes effect when extract_bounding_boxes is True.
   :type ocr_mode: OcrMode
   :param use_full_ocr: Whether to perform full OCR. If True, OCR will be performed on the full page. If False, OCR will be performed on the non-text regions only. By default, it will be decided automatically based on the OCR mode and the document type. This option only takes effect when extract_bounding_boxes is True.
   :type use_full_ocr: bool
   :param remove_header_footer: Whether to remove headers and footers. Defaults to False. This option only takes effect when extract_bounding_boxes is True.
   :type remove_header_footer: bool
   :param remove_watermarks: Whether to remove watermarks. By default, it will be decided automatically based on the OCR mode and the document type. This option only takes effect when extract_bounding_boxes is True.
   :type remove_watermarks: bool
   :param convert_to_markdown: Whether to convert extracted text to markdown. Defaults to False. This option only takes effect when extract_bounding_boxes is True.
   :type convert_to_markdown: bool
   :param mask_pii: Whether to mask personally identifiable information (PII) in the document text/tokens. Defaults to False.
   :type mask_pii: bool
   :param extract_images: Whether to extract images from the document e.g. diagrams in a PDF page. Defaults to False.
   :type extract_images: bool


   .. py:attribute:: document_type
      :type:  abacusai.api_class.enums.DocumentType
      :value: None



   .. py:attribute:: highlight_relevant_text
      :type:  bool
      :value: None



   .. py:attribute:: extract_bounding_boxes
      :type:  bool
      :value: False



   .. py:attribute:: ocr_mode
      :type:  abacusai.api_class.enums.OcrMode


   .. py:attribute:: use_full_ocr
      :type:  bool
      :value: None



   .. py:attribute:: remove_header_footer
      :type:  bool
      :value: False



   .. py:attribute:: remove_watermarks
      :type:  bool
      :value: True



   .. py:attribute:: convert_to_markdown
      :type:  bool
      :value: False



   .. py:attribute:: mask_pii
      :type:  bool
      :value: False



   .. py:attribute:: extract_images
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


   .. py:method:: _detect_ocr_mode()


   .. py:method:: _get_filtered_dict(config)
      :classmethod:


      Filters out default values from the config



.. py:class:: DatasetDocumentProcessingConfig

   Bases: :py:obj:`DocumentProcessingConfig`


   Document processing configuration for dataset imports.

   :param extract_bounding_boxes: Whether to perform OCR and extract bounding boxes. If False, no OCR will be done but only the embedded text from digital documents will be extracted. Defaults to False.
   :type extract_bounding_boxes: bool
   :param ocr_mode: OCR mode. There are different OCR modes available for different kinds of documents and use cases. This option only takes effect when extract_bounding_boxes is True.
   :type ocr_mode: OcrMode
   :param use_full_ocr: Whether to perform full OCR. If True, OCR will be performed on the full page. If False, OCR will be performed on the non-text regions only. By default, it will be decided automatically based on the OCR mode and the document type. This option only takes effect when extract_bounding_boxes is True.
   :type use_full_ocr: bool
   :param remove_header_footer: Whether to remove headers and footers. Defaults to False. This option only takes effect when extract_bounding_boxes is True.
   :type remove_header_footer: bool
   :param remove_watermarks: Whether to remove watermarks. By default, it will be decided automatically based on the OCR mode and the document type. This option only takes effect when extract_bounding_boxes is True.
   :type remove_watermarks: bool
   :param convert_to_markdown: Whether to convert extracted text to markdown. Defaults to False. This option only takes effect when extract_bounding_boxes is True.
   :type convert_to_markdown: bool
   :param page_text_column: Name of the output column which contains the extracted text for each page. If not provided, no column will be created.
   :type page_text_column: str


   .. py:attribute:: page_text_column
      :type:  str
      :value: None



.. py:class:: IncrementalDatabaseConnectorConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Config information for incremental datasets from database connectors

   :param timestamp_column: If dataset is incremental, this is the column name of the required column in the dataset. This column must contain timestamps in descending order which are used to determine the increments of the incremental dataset.
   :type timestamp_column: str


   .. py:attribute:: timestamp_column
      :type:  str
      :value: None



.. py:class:: AttachmentParsingConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Config information for parsing attachments

   :param feature_group_name: feature group name
   :type feature_group_name: str
   :param column_name: column name
   :type column_name: str
   :param urls: list of urls
   :type urls: str


   .. py:attribute:: feature_group_name
      :type:  str
      :value: None



   .. py:attribute:: column_name
      :type:  str
      :value: None



   .. py:attribute:: urls
      :type:  str
      :value: None



.. py:class:: ApplicationConnectorDatasetConfig

   Bases: :py:obj:`abacusai.api_class.dataset.DatasetConfig`


   An abstract class for dataset configs specific to application connectors.

   :param application_connector_type: The type of application connector
   :type application_connector_type: enums.ApplicationConnectorType
   :param application_connector_id: The ID of the application connector
   :type application_connector_id: str
   :param document_processing_config: The document processing configuration. Only valid if is_documentset is True for the dataset.
   :type document_processing_config: DatasetDocumentProcessingConfig


   .. py:attribute:: application_connector_type
      :type:  abacusai.api_class.enums.ApplicationConnectorType
      :value: None



   .. py:attribute:: application_connector_id
      :type:  str
      :value: None



   .. py:attribute:: document_processing_config
      :type:  abacusai.api_class.dataset.DatasetDocumentProcessingConfig
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: ConfluenceDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Confluence Application Connector
   :param location: The location of the pages to fetch
   :type location: str
   :param space_key: The space key of the space from which we fetch pages
   :type space_key: str
   :param pull_attachments: Whether to pull attachments for each page
   :type pull_attachments: bool
   :param extract_bounding_boxes: Whether to extract bounding boxes from the documents
   :type extract_bounding_boxes: bool


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: space_key
      :type:  str
      :value: None



   .. py:attribute:: pull_attachments
      :type:  bool
      :value: False



   .. py:attribute:: extract_bounding_boxes
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: BoxDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Box Application Connector
   :param location: The regex location of the files to fetch
   :type location: str
   :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
   :type csv_delimiter: str
   :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
   :type merge_file_schemas: bool


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: csv_delimiter
      :type:  str
      :value: None



   .. py:attribute:: merge_file_schemas
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: GoogleAnalyticsDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Google Analytics Application Connector

   :param location: The view id of the report in the connector to fetch
   :type location: str
   :param start_timestamp: Unix timestamp of the start of the period that will be queried
   :type start_timestamp: int
   :param end_timestamp: Unix timestamp of the end of the period that will be queried
   :type end_timestamp: int


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: start_timestamp
      :type:  int
      :value: None



   .. py:attribute:: end_timestamp
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: GoogleDriveDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Google Drive Application Connector

   :param location: The regex location of the files to fetch
   :type location: str
   :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
   :type csv_delimiter: str
   :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
   :type extract_bounding_boxes: bool
   :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
   :type merge_file_schemas: bool


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: csv_delimiter
      :type:  str
      :value: None



   .. py:attribute:: extract_bounding_boxes
      :type:  bool
      :value: False



   .. py:attribute:: merge_file_schemas
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: JiraDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Jira Application Connector

   :param jql: The JQL query for fetching issues
   :type jql: str


   .. py:attribute:: jql
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: OneDriveDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for OneDrive Application Connector

   :param location: The regex location of the files to fetch
   :type location: str
   :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
   :type csv_delimiter: str
   :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
   :type extract_bounding_boxes: bool
   :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
   :type merge_file_schemas: bool


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: csv_delimiter
      :type:  str
      :value: None



   .. py:attribute:: extract_bounding_boxes
      :type:  bool
      :value: False



   .. py:attribute:: merge_file_schemas
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: SharepointDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Sharepoint Application Connector

   :param location: The regex location of the files to fetch
   :type location: str
   :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
   :type csv_delimiter: str
   :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
   :type extract_bounding_boxes: bool
   :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
   :type merge_file_schemas: bool


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: csv_delimiter
      :type:  str
      :value: None



   .. py:attribute:: extract_bounding_boxes
      :type:  bool
      :value: False



   .. py:attribute:: merge_file_schemas
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: ZendeskDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Zendesk Application Connector

   :param location: The regex location of the files to fetch
   :type location: str


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: AbacusUsageMetricsDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Abacus Usage Metrics Application Connector

   :param include_entire_conversation_history: Whether to show the entire history for this deployment conversation
   :type include_entire_conversation_history: bool
   :param include_all_feedback: Whether to include all feedback for this deployment conversation
   :type include_all_feedback: bool
   :param resolve_matching_documents: Whether to get matching document references for response instead of prompt.
                                      Needs to recalculate them if highlights are unavailable in summary_info
   :type resolve_matching_documents: bool


   .. py:attribute:: include_entire_conversation_history
      :type:  bool
      :value: False



   .. py:attribute:: include_all_feedback
      :type:  bool
      :value: False



   .. py:attribute:: resolve_matching_documents
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: TeamsScraperDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Teams Scraper Application Connector

   :param pull_chat_messages: Whether to pull teams chat messages
   :type pull_chat_messages: bool
   :param pull_channel_posts: Whether to pull posts for each channel
   :type pull_channel_posts: bool
   :param pull_transcripts: Whether to pull transcripts for calendar meetings
   :type pull_transcripts: bool


   .. py:attribute:: pull_chat_messages
      :type:  bool
      :value: False



   .. py:attribute:: pull_channel_posts
      :type:  bool
      :value: False



   .. py:attribute:: pull_transcripts
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: FreshserviceDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for Freshservice Application Connector


   .. py:method:: __post_init__()


.. py:class:: SftpDatasetConfig

   Bases: :py:obj:`ApplicationConnectorDatasetConfig`


   Dataset config for SFTP Application Connector

   :param location: The regex location of the files to fetch
   :type location: str
   :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
   :type csv_delimiter: str
   :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
   :type extract_bounding_boxes: bool
   :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
   :type merge_file_schemas: bool


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: csv_delimiter
      :type:  str
      :value: None



   .. py:attribute:: extract_bounding_boxes
      :type:  bool
      :value: False



   .. py:attribute:: merge_file_schemas
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: _ApplicationConnectorDatasetConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'application_connector_type'



   .. py:attribute:: config_class_map


.. py:class:: PredictionArguments

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for prediction arguments specific to problem type.


   .. py:attribute:: _support_kwargs
      :type:  bool
      :value: True



   .. py:attribute:: kwargs
      :type:  dict


   .. py:attribute:: problem_type
      :type:  abacusai.api_class.enums.ProblemType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: OptimizationPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the OPTIMIZATION problem type

   :param forced_assignments: Set of assignments to force and resolve before returning query results.
   :type forced_assignments: dict
   :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
   :type solve_time_limit_seconds: float
   :param include_all_assignments: If True, will return all assignments, including assignments with value 0. Default is False.
   :type include_all_assignments: bool


   .. py:attribute:: forced_assignments
      :type:  dict
      :value: None



   .. py:attribute:: solve_time_limit_seconds
      :type:  float
      :value: None



   .. py:attribute:: include_all_assignments
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: TimeseriesAnomalyPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the TS_ANOMALY problem type

   :param start_timestamp: Timestamp from which anomalies have to be detected in the training data
   :type start_timestamp: str
   :param end_timestamp: Timestamp to which anomalies have to be detected in the training data
   :type end_timestamp: str
   :param get_all_item_data: If True, anomaly detection has to be performed on all the data related to input ids
   :type get_all_item_data: bool


   .. py:attribute:: start_timestamp
      :type:  str
      :value: None



   .. py:attribute:: end_timestamp
      :type:  str
      :value: None



   .. py:attribute:: get_all_item_data
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ChatLLMPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the CHAT_LLM problem type

   :param llm_name: Name of the specific LLM backend to use to power the chat experience.
   :type llm_name: str
   :param num_completion_tokens: Default for maximum number of tokens for chat answers.
   :type num_completion_tokens: int
   :param system_message: The generative LLM system message.
   :type system_message: str
   :param temperature: The generative LLM temperature.
   :type temperature: float
   :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
   :type search_score_cutoff: float
   :param ignore_documents: If True, will ignore any documents and search results, and only use the messages to generate a response.
   :type ignore_documents: bool


   .. py:attribute:: llm_name
      :type:  str
      :value: None



   .. py:attribute:: num_completion_tokens
      :type:  int
      :value: None



   .. py:attribute:: system_message
      :type:  str
      :value: None



   .. py:attribute:: temperature
      :type:  float
      :value: None



   .. py:attribute:: search_score_cutoff
      :type:  float
      :value: None



   .. py:attribute:: ignore_documents
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: RegressionPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the PREDICTIVE_MODELING problem type

   :param explain_predictions: If true, will explain predictions.
   :type explain_predictions: bool
   :param explainer_type: Type of explainer to use for explanations.
   :type explainer_type: str


   .. py:attribute:: explain_predictions
      :type:  bool
      :value: None



   .. py:attribute:: explainer_type
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ForecastingPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the FORECASTING problem type

   :param num_predictions: The number of timestamps to predict in the future.
   :type num_predictions: int
   :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
   :type prediction_start: str
   :param explain_predictions: If True, explain predictions for forecasting.
   :type explain_predictions: bool
   :param explainer_type: Type of explainer to use for explanations.
   :type explainer_type: str
   :param get_item_data: If True, will return the data corresponding to items as well.
   :type get_item_data: bool


   .. py:attribute:: num_predictions
      :type:  int
      :value: None



   .. py:attribute:: prediction_start
      :type:  str
      :value: None



   .. py:attribute:: explain_predictions
      :type:  bool
      :value: None



   .. py:attribute:: explainer_type
      :type:  str
      :value: None



   .. py:attribute:: get_item_data
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: CumulativeForecastingPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the CUMULATIVE_FORECASTING problem type

   :param num_predictions: The number of timestamps to predict in the future.
   :type num_predictions: int
   :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
   :type prediction_start: str
   :param explain_predictions: If True, explain predictions for forecasting.
   :type explain_predictions: bool
   :param explainer_type: Type of explainer to use for explanations.
   :type explainer_type: str
   :param get_item_data: If True, will return the data corresponding to items as well.
   :type get_item_data: bool


   .. py:attribute:: num_predictions
      :type:  int
      :value: None



   .. py:attribute:: prediction_start
      :type:  str
      :value: None



   .. py:attribute:: explain_predictions
      :type:  bool
      :value: None



   .. py:attribute:: explainer_type
      :type:  str
      :value: None



   .. py:attribute:: get_item_data
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: NaturalLanguageSearchPredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the NATURAL_LANGUAGE_SEARCH problem type

   :param llm_name: Name of the specific LLM backend to use to power the chat experience.
   :type llm_name: str
   :param num_completion_tokens: Default for maximum number of tokens for chat answers.
   :type num_completion_tokens: int
   :param system_message: The generative LLM system message.
   :type system_message: str
   :param temperature: The generative LLM temperature.
   :type temperature: float
   :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
   :type search_score_cutoff: float
   :param ignore_documents: If True, will ignore any documents and search results, and only use the messages to generate a response.
   :type ignore_documents: bool


   .. py:attribute:: llm_name
      :type:  str
      :value: None



   .. py:attribute:: num_completion_tokens
      :type:  int
      :value: None



   .. py:attribute:: system_message
      :type:  str
      :value: None



   .. py:attribute:: temperature
      :type:  float
      :value: None



   .. py:attribute:: search_score_cutoff
      :type:  float
      :value: None



   .. py:attribute:: ignore_documents
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: FeatureStorePredictionArguments

   Bases: :py:obj:`PredictionArguments`


   Prediction arguments for the FEATURE_STORE problem type

   :param limit_results: If provided, will limit the number of results to the value specified.
   :type limit_results: int


   .. py:attribute:: limit_results
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: _PredictionArgumentsFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'problem_type'



   .. py:attribute:: config_class_map


.. py:class:: VectorStoreTextEncoder

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: E5
      :value: 'E5'



   .. py:attribute:: OPENAI
      :value: 'OPENAI'



   .. py:attribute:: OPENAI_COMPACT
      :value: 'OPENAI_COMPACT'



   .. py:attribute:: OPENAI_LARGE
      :value: 'OPENAI_LARGE'



   .. py:attribute:: SENTENCE_BERT
      :value: 'SENTENCE_BERT'



   .. py:attribute:: E5_SMALL
      :value: 'E5_SMALL'



   .. py:attribute:: CODE_BERT
      :value: 'CODE_BERT'



.. py:class:: VectorStoreConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Config for indexing options of a document retriever. Default values of optional arguments are heuristically selected by the Abacus.AI platform based on the underlying data.

   :param chunk_size: The size of text chunks in the vector store.
   :type chunk_size: int
   :param chunk_overlap_fraction: The fraction of overlap between chunks.
   :type chunk_overlap_fraction: float
   :param text_encoder: Encoder used to index texts from the documents.
   :type text_encoder: VectorStoreTextEncoder
   :param chunk_size_factors: Chunking data with multiple sizes. The specified list of factors are used to calculate more sizes, in addition to `chunk_size`.
   :type chunk_size_factors: list
   :param score_multiplier_column: If provided, will use the values in this metadata column to modify the relevance score of returned chunks for all queries.
   :type score_multiplier_column: str
   :param prune_vectors: Transform vectors using SVD so that the average component of vectors in the corpus are removed.
   :type prune_vectors: bool
   :param index_metadata_columns: If True, metadata columns of the FG will also be used for indexing and querying.
   :type index_metadata_columns: bool
   :param use_document_summary: If True, uses the summary of the document in addition to chunks of the document for indexing and querying.
   :type use_document_summary: bool
   :param summary_instructions: Instructions for the LLM to generate the document summary.
   :type summary_instructions: str
   :param standalone_deployment: If True, the document retriever will be deployed as a standalone deployment.
   :type standalone_deployment: bool


   .. py:attribute:: chunk_size
      :type:  int
      :value: None



   .. py:attribute:: chunk_overlap_fraction
      :type:  float
      :value: None



   .. py:attribute:: text_encoder
      :type:  abacusai.api_class.enums.VectorStoreTextEncoder
      :value: None



   .. py:attribute:: chunk_size_factors
      :type:  list
      :value: None



   .. py:attribute:: score_multiplier_column
      :type:  str
      :value: None



   .. py:attribute:: prune_vectors
      :type:  bool
      :value: None



   .. py:attribute:: index_metadata_columns
      :type:  bool
      :value: None



   .. py:attribute:: use_document_summary
      :type:  bool
      :value: None



   .. py:attribute:: summary_instructions
      :type:  str
      :value: None



   .. py:attribute:: standalone_deployment
      :type:  bool
      :value: False



.. py:data:: DocumentRetrieverConfig

.. py:function:: deprecated_enums(*enum_values)

.. py:class:: ApiEnum

   Bases: :py:obj:`enum.Enum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: __deprecated_values__
      :value: []



   .. py:method:: is_deprecated()


   .. py:method:: __eq__(other)


   .. py:method:: __hash__()


.. py:class:: ProblemType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AI_AGENT
      :value: 'ai_agent'



   .. py:attribute:: EVENT_ANOMALY
      :value: 'event_anomaly'



   .. py:attribute:: CLUSTERING
      :value: 'clustering'



   .. py:attribute:: CLUSTERING_TIMESERIES
      :value: 'clustering_timeseries'



   .. py:attribute:: CUMULATIVE_FORECASTING
      :value: 'cumulative_forecasting'



   .. py:attribute:: NAMED_ENTITY_EXTRACTION
      :value: 'nlp_ner'



   .. py:attribute:: NATURAL_LANGUAGE_SEARCH
      :value: 'nlp_search'



   .. py:attribute:: CHAT_LLM
      :value: 'chat_llm'



   .. py:attribute:: SENTENCE_BOUNDARY_DETECTION
      :value: 'nlp_sentence_boundary_detection'



   .. py:attribute:: SENTIMENT_DETECTION
      :value: 'nlp_sentiment'



   .. py:attribute:: DOCUMENT_CLASSIFICATION
      :value: 'nlp_classification'



   .. py:attribute:: DOCUMENT_SUMMARIZATION
      :value: 'nlp_summarization'



   .. py:attribute:: DOCUMENT_VISUALIZATION
      :value: 'nlp_document_visualization'



   .. py:attribute:: PERSONALIZATION
      :value: 'personalization'



   .. py:attribute:: PREDICTIVE_MODELING
      :value: 'regression'



   .. py:attribute:: FINETUNED_LLM
      :value: 'finetuned_llm'



   .. py:attribute:: FORECASTING
      :value: 'forecasting'



   .. py:attribute:: CUSTOM_TRAINED_MODEL
      :value: 'plug_and_play'



   .. py:attribute:: CUSTOM_ALGORITHM
      :value: 'trainable_plug_and_play'



   .. py:attribute:: FEATURE_STORE
      :value: 'feature_store'



   .. py:attribute:: IMAGE_CLASSIFICATION
      :value: 'vision_classification'



   .. py:attribute:: OBJECT_DETECTION
      :value: 'vision_object_detection'



   .. py:attribute:: IMAGE_VALUE_PREDICTION
      :value: 'vision_regression'



   .. py:attribute:: MODEL_MONITORING
      :value: 'model_monitoring'



   .. py:attribute:: LANGUAGE_DETECTION
      :value: 'language_detection'



   .. py:attribute:: OPTIMIZATION
      :value: 'optimization'



   .. py:attribute:: PRETRAINED_MODELS
      :value: 'pretrained'



   .. py:attribute:: THEME_ANALYSIS
      :value: 'theme_analysis'



   .. py:attribute:: TS_ANOMALY
      :value: 'ts_anomaly'



.. py:class:: RegressionObjective

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AUC
      :value: 'auc'



   .. py:attribute:: ACCURACY
      :value: 'acc'



   .. py:attribute:: LOG_LOSS
      :value: 'log_loss'



   .. py:attribute:: PRECISION
      :value: 'precision'



   .. py:attribute:: RECALL
      :value: 'recall'



   .. py:attribute:: F1_SCORE
      :value: 'fscore'



   .. py:attribute:: MAE
      :value: 'mae'



   .. py:attribute:: MAPE
      :value: 'mape'



   .. py:attribute:: WAPE
      :value: 'wape'



   .. py:attribute:: RMSE
      :value: 'rmse'



   .. py:attribute:: R_SQUARED_COEFFICIENT_OF_DETERMINATION
      :value: 'r^2'



.. py:class:: RegressionTreeHPOMode

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: RAPID
      :value: 'rapid'



   .. py:attribute:: THOROUGH
      :value: 'thorough'



.. py:class:: PartialDependenceAnalysis

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: RAPID
      :value: 'rapid'



   .. py:attribute:: THOROUGH
      :value: 'thorough'



.. py:class:: RegressionAugmentationStrategy

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: SMOTE
      :value: 'smote'



   .. py:attribute:: RESAMPLE
      :value: 'resample'



.. py:class:: RegressionTargetTransform

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: LOG
      :value: 'log'



   .. py:attribute:: QUANTILE
      :value: 'quantile'



   .. py:attribute:: YEO_JOHNSON
      :value: 'yeo-johnson'



   .. py:attribute:: BOX_COX
      :value: 'box-cox'



.. py:class:: RegressionTypeOfSplit

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: RANDOM
      :value: 'Random Sampling'



   .. py:attribute:: TIMESTAMP_BASED
      :value: 'Timestamp Based'



   .. py:attribute:: ROW_INDICATOR_BASED
      :value: 'Row Indicator Based'



   .. py:attribute:: STRATIFIED_RANDOM_SAMPLING
      :value: 'Stratified Random Sampling'



.. py:class:: RegressionTimeSplitMethod

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: TEST_SPLIT_PERCENTAGE_BASED
      :value: 'Test Split Percentage Based'



   .. py:attribute:: TEST_START_TIMESTAMP_BASED
      :value: 'Test Start Timestamp Based'



.. py:class:: RegressionLossFunction

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: HUBER
      :value: 'Huber'



   .. py:attribute:: MSE
      :value: 'Mean Squared Error'



   .. py:attribute:: MAE
      :value: 'Mean Absolute Error'



   .. py:attribute:: MAPE
      :value: 'Mean Absolute Percentage Error'



   .. py:attribute:: MSLE
      :value: 'Mean Squared Logarithmic Error'



   .. py:attribute:: TWEEDIE
      :value: 'Tweedie'



   .. py:attribute:: CROSS_ENTROPY
      :value: 'Cross Entropy'



   .. py:attribute:: FOCAL_CROSS_ENTROPY
      :value: 'Focal Cross Entropy'



   .. py:attribute:: AUTOMATIC
      :value: 'Automatic'



   .. py:attribute:: CUSTOM
      :value: 'Custom'



.. py:class:: ExplainerType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: KERNEL_EXPLAINER
      :value: 'KERNEL_EXPLAINER'



   .. py:attribute:: LIME_EXPLAINER
      :value: 'LIME_EXPLAINER'



   .. py:attribute:: TREE_EXPLAINER
      :value: 'TREE_EXPLAINER'



   .. py:attribute:: EBM_EXPLAINER
      :value: 'EBM_EXPLAINER'



.. py:class:: SamplingMethodType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: N_SAMPLING
      :value: 'N_SAMPLING'



   .. py:attribute:: PERCENT_SAMPLING
      :value: 'PERCENT_SAMPLING'



.. py:class:: MergeMode

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: LAST_N
      :value: 'LAST_N'



   .. py:attribute:: TIME_WINDOW
      :value: 'TIME_WINDOW'



.. py:class:: OperatorType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: UNPIVOT
      :value: 'UNPIVOT'



   .. py:attribute:: MARKDOWN
      :value: 'MARKDOWN'



   .. py:attribute:: CRAWLER
      :value: 'CRAWLER'



   .. py:attribute:: EXTRACT_DOCUMENT_DATA
      :value: 'EXTRACT_DOCUMENT_DATA'



   .. py:attribute:: DATA_GENERATION
      :value: 'DATA_GENERATION'



   .. py:attribute:: UNION
      :value: 'UNION'



.. py:class:: MarkdownOperatorInputType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: HTML
      :value: 'HTML'



.. py:class:: FillLogic

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AVERAGE
      :value: 'average'



   .. py:attribute:: MAX
      :value: 'max'



   .. py:attribute:: MEDIAN
      :value: 'median'



   .. py:attribute:: MIN
      :value: 'min'



   .. py:attribute:: CUSTOM
      :value: 'custom'



   .. py:attribute:: BACKFILL
      :value: 'bfill'



   .. py:attribute:: FORWARDFILL
      :value: 'ffill'



   .. py:attribute:: LINEAR
      :value: 'linear'



   .. py:attribute:: NEAREST
      :value: 'nearest'



.. py:class:: BatchSize

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: BATCH_8
      :value: 8



   .. py:attribute:: BATCH_16
      :value: 16



   .. py:attribute:: BATCH_32
      :value: 32



   .. py:attribute:: BATCH_64
      :value: 64



   .. py:attribute:: BATCH_128
      :value: 128



   .. py:attribute:: BATCH_256
      :value: 256



   .. py:attribute:: BATCH_384
      :value: 384



   .. py:attribute:: BATCH_512
      :value: 512



   .. py:attribute:: BATCH_740
      :value: 740



   .. py:attribute:: BATCH_1024
      :value: 1024



.. py:class:: HolidayCalendars

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AU
      :value: 'AU'



   .. py:attribute:: UK
      :value: 'UK'



   .. py:attribute:: US
      :value: 'US'



.. py:class:: FileFormat

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AVRO
      :value: 'AVRO'



   .. py:attribute:: PARQUET
      :value: 'PARQUET'



   .. py:attribute:: TFRECORD
      :value: 'TFRECORD'



   .. py:attribute:: TSV
      :value: 'TSV'



   .. py:attribute:: CSV
      :value: 'CSV'



   .. py:attribute:: ORC
      :value: 'ORC'



   .. py:attribute:: JSON
      :value: 'JSON'



   .. py:attribute:: ODS
      :value: 'ODS'



   .. py:attribute:: XLS
      :value: 'XLS'



   .. py:attribute:: GZ
      :value: 'GZ'



   .. py:attribute:: ZIP
      :value: 'ZIP'



   .. py:attribute:: TAR
      :value: 'TAR'



   .. py:attribute:: DOCX
      :value: 'DOCX'



   .. py:attribute:: PDF
      :value: 'PDF'



   .. py:attribute:: MD
      :value: 'md'



   .. py:attribute:: RAR
      :value: 'RAR'



   .. py:attribute:: GIF
      :value: 'GIF'



   .. py:attribute:: JPEG
      :value: 'JPG'



   .. py:attribute:: PNG
      :value: 'PNG'



   .. py:attribute:: TIF
      :value: 'TIFF'



   .. py:attribute:: NUMBERS
      :value: 'NUMBERS'



   .. py:attribute:: PPTX
      :value: 'PPTX'



   .. py:attribute:: PPT
      :value: 'PPT'



   .. py:attribute:: HTML
      :value: 'HTML'



   .. py:attribute:: TXT
      :value: 'txt'



   .. py:attribute:: EML
      :value: 'eml'



   .. py:attribute:: MP3
      :value: 'MP3'



   .. py:attribute:: MP4
      :value: 'MP4'



   .. py:attribute:: FLV
      :value: 'flv'



   .. py:attribute:: MOV
      :value: 'mov'



   .. py:attribute:: MPG
      :value: 'mpg'



   .. py:attribute:: MPEG
      :value: 'mpeg'



   .. py:attribute:: WEBP
      :value: 'webp'



   .. py:attribute:: WEBM
      :value: 'webm'



   .. py:attribute:: WMV
      :value: 'wmv'



   .. py:attribute:: MSG
      :value: 'msg'



.. py:class:: ExperimentationMode

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: RAPID
      :value: 'rapid'



   .. py:attribute:: THOROUGH
      :value: 'thorough'



.. py:class:: PersonalizationTrainingMode

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: EXPERIMENTAL
      :value: 'EXP'



   .. py:attribute:: PRODUCTION
      :value: 'PROD'



.. py:class:: PersonalizationObjective

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: NDCG
      :value: 'ndcg'



   .. py:attribute:: NDCG_5
      :value: 'ndcg@5'



   .. py:attribute:: NDCG_10
      :value: 'ndcg@10'



   .. py:attribute:: MAP
      :value: 'map'



   .. py:attribute:: MAP_5
      :value: 'map@5'



   .. py:attribute:: MAP_10
      :value: 'map@10'



   .. py:attribute:: MRR
      :value: 'mrr'



   .. py:attribute:: PERSONALIZATION
      :value: 'personalization@10'



   .. py:attribute:: COVERAGE
      :value: 'coverage'



.. py:class:: ForecastingObjective

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: ACCURACY
      :value: 'w_c_accuracy'



   .. py:attribute:: WAPE
      :value: 'wape'



   .. py:attribute:: MAPE
      :value: 'mape'



   .. py:attribute:: CMAPE
      :value: 'cmape'



   .. py:attribute:: RMSE
      :value: 'rmse'



   .. py:attribute:: CV
      :value: 'coefficient_of_variation'



   .. py:attribute:: BIAS
      :value: 'bias'



   .. py:attribute:: SRMSE
      :value: 'srmse'



.. py:class:: ForecastingFrequency

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: HOURLY
      :value: '1H'



   .. py:attribute:: DAILY
      :value: '1D'



   .. py:attribute:: WEEKLY_SUNDAY_START
      :value: '1W'



   .. py:attribute:: WEEKLY_MONDAY_START
      :value: 'W-MON'



   .. py:attribute:: WEEKLY_SATURDAY_START
      :value: 'W-SAT'



   .. py:attribute:: MONTH_START
      :value: 'MS'



   .. py:attribute:: MONTH_END
      :value: '1M'



   .. py:attribute:: QUARTER_START
      :value: 'QS'



   .. py:attribute:: QUARTER_END
      :value: '1Q'



   .. py:attribute:: YEARLY
      :value: '1Y'



.. py:class:: ForecastingDataSplitType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AUTO
      :value: 'Automatic Time Based'



   .. py:attribute:: TIMESTAMP
      :value: 'Timestamp Based'



   .. py:attribute:: ITEM
      :value: 'Item Based'



   .. py:attribute:: PREDICTION_LENGTH
      :value: 'Force Prediction Length'



   .. py:attribute:: L_SHAPED_AUTO
      :value: 'L-shaped Split - Automatic Time Based'



   .. py:attribute:: L_SHAPED_TIMESTAMP
      :value: 'L-shaped Split - Timestamp Based'



.. py:class:: ForecastingLossFunction

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: CUSTOM
      :value: 'Custom'



   .. py:attribute:: MEAN_ABSOLUTE_ERROR
      :value: 'mae'



   .. py:attribute:: NORMALIZED_MEAN_ABSOLUTE_ERROR
      :value: 'nmae'



   .. py:attribute:: PEAKS_MEAN_ABSOLUTE_ERROR
      :value: 'peaks_mae'



   .. py:attribute:: MEAN_ABSOLUTE_PERCENTAGE_ERROR
      :value: 'stable_mape'



   .. py:attribute:: POINTWISE_ACCURACY
      :value: 'accuracy'



   .. py:attribute:: ROOT_MEAN_SQUARE_ERROR
      :value: 'rmse'



   .. py:attribute:: NORMALIZED_ROOT_MEAN_SQUARE_ERROR
      :value: 'nrmse'



   .. py:attribute:: ASYMMETRIC_MEAN_ABSOLUTE_PERCENTAGE_ERROR
      :value: 'asymmetric_mape'



   .. py:attribute:: STABLE_STANDARDIZED_MEAN_ABSOLUTE_PERCENTAGE_ERROR
      :value: 'stable_standardized_mape_with_cmape'



   .. py:attribute:: GAUSSIAN
      :value: 'mle_gaussian_local'



   .. py:attribute:: GAUSSIAN_FULL_COVARIANCE
      :value: 'mle_gaussfullcov'



   .. py:attribute:: GUASSIAN_EXPONENTIAL
      :value: 'mle_gaussexp'



   .. py:attribute:: MIX_GAUSSIANS
      :value: 'mle_gaussmix'



   .. py:attribute:: WEIBULL
      :value: 'mle_weibull'



   .. py:attribute:: NEGATIVE_BINOMIAL
      :value: 'mle_negbinom'



   .. py:attribute:: LOG_ROOT_MEAN_SQUARE_ERROR
      :value: 'log_rmse'



.. py:class:: ForecastingLocalScaling

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: ZSCORE
      :value: 'zscore'



   .. py:attribute:: SLIDING_ZSCORE
      :value: 'sliding_zscore'



   .. py:attribute:: LAST_POINT
      :value: 'lastpoint'



   .. py:attribute:: MIN_MAX
      :value: 'minmax'



   .. py:attribute:: MIN_STD
      :value: 'minstd'



   .. py:attribute:: ROBUST
      :value: 'robust'



   .. py:attribute:: ITEM
      :value: 'item'



.. py:class:: ForecastingFillMethod

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: BACK
      :value: 'BACK'



   .. py:attribute:: MIDDLE
      :value: 'MIDDLE'



   .. py:attribute:: FUTURE
      :value: 'FUTURE'



.. py:class:: ForecastingQuanitlesExtensionMethod

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: DIRECT
      :value: 'direct'



   .. py:attribute:: QUADRATIC
      :value: 'quadratic'



   .. py:attribute:: ANCESTRAL_SIMULATION
      :value: 'simulation'



.. py:class:: TimeseriesAnomalyDataSplitType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AUTO
      :value: 'Automatic Time Based'



   .. py:attribute:: TIMESTAMP
      :value: 'Fixed Timestamp Based'



.. py:class:: TimeseriesAnomalyTypeOfAnomaly

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: HIGH_PEAK
      :value: 'high_peak'



   .. py:attribute:: LOW_PEAK
      :value: 'low_peak'



.. py:class:: TimeseriesAnomalyUseHeuristic

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: ENABLE
      :value: 'enable'



   .. py:attribute:: DISABLE
      :value: 'disable'



   .. py:attribute:: AUTOMATIC
      :value: 'automatic'



.. py:class:: NERObjective

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: LOG_LOSS
      :value: 'log_loss'



   .. py:attribute:: AUC
      :value: 'auc'



   .. py:attribute:: PRECISION
      :value: 'precision'



   .. py:attribute:: RECALL
      :value: 'recall'



   .. py:attribute:: ANNOTATIONS_PRECISION
      :value: 'annotations_precision'



   .. py:attribute:: ANNOTATIONS_RECALL
      :value: 'annotations_recall'



.. py:class:: NERModelType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: PRETRAINED_BERT
      :value: 'pretrained_bert'



   .. py:attribute:: PRETRAINED_ROBERTA_27
      :value: 'pretrained_roberta_27'



   .. py:attribute:: PRETRAINED_ROBERTA_43
      :value: 'pretrained_roberta_43'



   .. py:attribute:: PRETRAINED_MULTILINGUAL
      :value: 'pretrained_multilingual'



   .. py:attribute:: LEARNED
      :value: 'learned'



.. py:class:: NLPDocumentFormat

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AUTO
      :value: 'auto'



   .. py:attribute:: TEXT
      :value: 'text'



   .. py:attribute:: DOC
      :value: 'doc'



   .. py:attribute:: TOKENS
      :value: 'tokens'



.. py:class:: SentimentType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: VALENCE
      :value: 'valence'



   .. py:attribute:: EMOTION
      :value: 'emotion'



.. py:class:: ClusteringImputationMethod

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AUTOMATIC
      :value: 'Automatic'



   .. py:attribute:: ZEROS
      :value: 'Zeros'



   .. py:attribute:: INTERPOLATE
      :value: 'Interpolate'



.. py:class:: ConnectorType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: FILE
      :value: 'FILE'



   .. py:attribute:: DATABASE
      :value: 'DATABASE'



   .. py:attribute:: STREAMING
      :value: 'STREAMING'



   .. py:attribute:: APPLICATION
      :value: 'APPLICATION'



.. py:class:: ApplicationConnectorType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: GOOGLEANALYTICS
      :value: 'GOOGLEANALYTICS'



   .. py:attribute:: GOOGLEDRIVE
      :value: 'GOOGLEDRIVE'



   .. py:attribute:: GIT
      :value: 'GIT'



   .. py:attribute:: CONFLUENCE
      :value: 'CONFLUENCE'



   .. py:attribute:: JIRA
      :value: 'JIRA'



   .. py:attribute:: ONEDRIVE
      :value: 'ONEDRIVE'



   .. py:attribute:: ZENDESK
      :value: 'ZENDESK'



   .. py:attribute:: SLACK
      :value: 'SLACK'



   .. py:attribute:: SHAREPOINT
      :value: 'SHAREPOINT'



   .. py:attribute:: TEAMS
      :value: 'TEAMS'



   .. py:attribute:: ABACUSUSAGEMETRICS
      :value: 'ABACUSUSAGEMETRICS'



   .. py:attribute:: MICROSOFTAUTH
      :value: 'MICROSOFTAUTH'



   .. py:attribute:: FRESHSERVICE
      :value: 'FRESHSERVICE'



   .. py:attribute:: ZENDESKSUNSHINEMESSAGING
      :value: 'ZENDESKSUNSHINEMESSAGING'



   .. py:attribute:: GOOGLEDRIVEUSER
      :value: 'GOOGLEDRIVEUSER'



   .. py:attribute:: GOOGLEWORKSPACEUSER
      :value: 'GOOGLEWORKSPACEUSER'



   .. py:attribute:: GMAILUSER
      :value: 'GMAILUSER'



   .. py:attribute:: GOOGLECALENDAR
      :value: 'GOOGLECALENDAR'



   .. py:attribute:: GOOGLESHEETS
      :value: 'GOOGLESHEETS'



   .. py:attribute:: GOOGLEDOCS
      :value: 'GOOGLEDOCS'



   .. py:attribute:: TEAMSSCRAPER
      :value: 'TEAMSSCRAPER'



   .. py:attribute:: GITHUBUSER
      :value: 'GITHUBUSER'



   .. py:attribute:: OKTASAML
      :value: 'OKTASAML'



   .. py:attribute:: BOX
      :value: 'BOX'



   .. py:attribute:: SFTPAPPLICATION
      :value: 'SFTPAPPLICATION'



   .. py:attribute:: OAUTH
      :value: 'OAUTH'



.. py:class:: StreamingConnectorType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: KAFKA
      :value: 'KAFKA'



.. py:class:: PythonFunctionArgumentType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: FEATURE_GROUP
      :value: 'FEATURE_GROUP'



   .. py:attribute:: INTEGER
      :value: 'INTEGER'



   .. py:attribute:: STRING
      :value: 'STRING'



   .. py:attribute:: BOOLEAN
      :value: 'BOOLEAN'



   .. py:attribute:: FLOAT
      :value: 'FLOAT'



   .. py:attribute:: JSON
      :value: 'JSON'



   .. py:attribute:: LIST
      :value: 'LIST'



   .. py:attribute:: DATASET_ID
      :value: 'DATASET_ID'



   .. py:attribute:: MODEL_ID
      :value: 'MODEL_ID'



   .. py:attribute:: FEATURE_GROUP_ID
      :value: 'FEATURE_GROUP_ID'



   .. py:attribute:: MONITOR_ID
      :value: 'MONITOR_ID'



   .. py:attribute:: BATCH_PREDICTION_ID
      :value: 'BATCH_PREDICTION_ID'



   .. py:attribute:: DEPLOYMENT_ID
      :value: 'DEPLOYMENT_ID'



   .. py:attribute:: ATTACHMENT
      :value: 'ATTACHMENT'



.. py:class:: PythonFunctionOutputArgumentType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: NTEGER
      :value: 'INTEGER'



   .. py:attribute:: STRING
      :value: 'STRING'



   .. py:attribute:: BOOLEAN
      :value: 'BOOLEAN'



   .. py:attribute:: FLOAT
      :value: 'FLOAT'



   .. py:attribute:: JSON
      :value: 'JSON'



   .. py:attribute:: LIST
      :value: 'LIST'



   .. py:attribute:: DATASET_ID
      :value: 'DATASET_ID'



   .. py:attribute:: MODEL_ID
      :value: 'MODEL_ID'



   .. py:attribute:: FEATURE_GROUP_ID
      :value: 'FEATURE_GROUP_ID'



   .. py:attribute:: MONITOR_ID
      :value: 'MONITOR_ID'



   .. py:attribute:: BATCH_PREDICTION_ID
      :value: 'BATCH_PREDICTION_ID'



   .. py:attribute:: DEPLOYMENT_ID
      :value: 'DEPLOYMENT_ID'



   .. py:attribute:: ANY
      :value: 'ANY'



   .. py:attribute:: ATTACHMENT
      :value: 'ATTACHMENT'



.. py:class:: LLMName

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: OPENAI_GPT4
      :value: 'OPENAI_GPT4'



   .. py:attribute:: OPENAI_GPT4_32K
      :value: 'OPENAI_GPT4_32K'



   .. py:attribute:: OPENAI_GPT4_128K
      :value: 'OPENAI_GPT4_128K'



   .. py:attribute:: OPENAI_GPT4_128K_LATEST
      :value: 'OPENAI_GPT4_128K_LATEST'



   .. py:attribute:: OPENAI_GPT4O
      :value: 'OPENAI_GPT4O'



   .. py:attribute:: OPENAI_GPT4O_MINI
      :value: 'OPENAI_GPT4O_MINI'



   .. py:attribute:: OPENAI_O1_MINI
      :value: 'OPENAI_O1_MINI'



   .. py:attribute:: OPENAI_GPT3_5
      :value: 'OPENAI_GPT3_5'



   .. py:attribute:: OPENAI_GPT3_5_TEXT
      :value: 'OPENAI_GPT3_5_TEXT'



   .. py:attribute:: LLAMA3_1_405B
      :value: 'LLAMA3_1_405B'



   .. py:attribute:: LLAMA3_1_70B
      :value: 'LLAMA3_1_70B'



   .. py:attribute:: LLAMA3_1_8B
      :value: 'LLAMA3_1_8B'



   .. py:attribute:: LLAMA3_3_70B
      :value: 'LLAMA3_3_70B'



   .. py:attribute:: LLAMA3_LARGE_CHAT
      :value: 'LLAMA3_LARGE_CHAT'



   .. py:attribute:: CLAUDE_V3_OPUS
      :value: 'CLAUDE_V3_OPUS'



   .. py:attribute:: CLAUDE_V3_SONNET
      :value: 'CLAUDE_V3_SONNET'



   .. py:attribute:: CLAUDE_V3_HAIKU
      :value: 'CLAUDE_V3_HAIKU'



   .. py:attribute:: CLAUDE_V3_5_SONNET
      :value: 'CLAUDE_V3_5_SONNET'



   .. py:attribute:: CLAUDE_V3_7_SONNET
      :value: 'CLAUDE_V3_7_SONNET'



   .. py:attribute:: CLAUDE_V3_5_HAIKU
      :value: 'CLAUDE_V3_5_HAIKU'



   .. py:attribute:: GEMINI_1_5_PRO
      :value: 'GEMINI_1_5_PRO'



   .. py:attribute:: GEMINI_2_FLASH
      :value: 'GEMINI_2_FLASH'



   .. py:attribute:: GEMINI_2_FLASH_THINKING
      :value: 'GEMINI_2_FLASH_THINKING'



   .. py:attribute:: GEMINI_2_PRO
      :value: 'GEMINI_2_PRO'



   .. py:attribute:: ABACUS_SMAUG3
      :value: 'ABACUS_SMAUG3'



   .. py:attribute:: ABACUS_DRACARYS
      :value: 'ABACUS_DRACARYS'



   .. py:attribute:: QWEN_2_5_32B
      :value: 'QWEN_2_5_32B'



   .. py:attribute:: QWEN_2_5_32B_BASE
      :value: 'QWEN_2_5_32B_BASE'



   .. py:attribute:: QWEN_2_5_72B
      :value: 'QWEN_2_5_72B'



   .. py:attribute:: QWQ_32B
      :value: 'QWQ_32B'



   .. py:attribute:: GEMINI_1_5_FLASH
      :value: 'GEMINI_1_5_FLASH'



   .. py:attribute:: XAI_GROK
      :value: 'XAI_GROK'



   .. py:attribute:: DEEPSEEK_V3
      :value: 'DEEPSEEK_V3'



   .. py:attribute:: DEEPSEEK_R1
      :value: 'DEEPSEEK_R1'



.. py:class:: MonitorAlertType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: ACCURACY_BELOW_THRESHOLD
      :value: 'AccuracyBelowThreshold'



   .. py:attribute:: FEATURE_DRIFT
      :value: 'FeatureDrift'



   .. py:attribute:: DATA_INTEGRITY_VIOLATIONS
      :value: 'DataIntegrityViolations'



   .. py:attribute:: BIAS_VIOLATIONS
      :value: 'BiasViolations'



   .. py:attribute:: HISTORY_LENGTH_DRIFT
      :value: 'HistoryLengthDrift'



   .. py:attribute:: TARGET_DRIFT
      :value: 'TargetDrift'



   .. py:attribute:: PREDICTION_COUNT
      :value: 'PredictionCount'



.. py:class:: FeatureDriftType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: KL
      :value: 'kl'



   .. py:attribute:: KS
      :value: 'ks'



   .. py:attribute:: WS
      :value: 'ws'



   .. py:attribute:: JS
      :value: 'js'



   .. py:attribute:: PSI
      :value: 'psi'



   .. py:attribute:: CHI_SQUARE
      :value: 'chi_square'



   .. py:attribute:: CSI
      :value: 'csi'



.. py:class:: DataIntegrityViolationType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: NULL_VIOLATIONS
      :value: 'null_violations'



   .. py:attribute:: RANGE_VIOLATIONS
      :value: 'range_violations'



   .. py:attribute:: CATEGORICAL_RANGE_VIOLATION
      :value: 'categorical_range_violations'



   .. py:attribute:: TOTAL_VIOLATIONS
      :value: 'total_violations'



.. py:class:: BiasType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: DEMOGRAPHIC_PARITY
      :value: 'demographic_parity'



   .. py:attribute:: EQUAL_OPPORTUNITY
      :value: 'equal_opportunity'



   .. py:attribute:: GROUP_BENEFIT_EQUALITY
      :value: 'group_benefit'



   .. py:attribute:: TOTAL
      :value: 'total'



.. py:class:: AlertActionType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: EMAIL
      :value: 'Email'



.. py:class:: PythonFunctionType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: FEATURE_GROUP
      :value: 'FEATURE_GROUP'



   .. py:attribute:: PLOTLY_FIG
      :value: 'PLOTLY_FIG'



   .. py:attribute:: STEP_FUNCTION
      :value: 'STEP_FUNCTION'



   .. py:attribute:: USERCODE_TOOL
      :value: 'USERCODE_TOOL'



   .. py:attribute:: CONNECTOR_TOOL
      :value: 'CONNECTOR_TOOL'



.. py:class:: EvalArtifactType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: FORECASTING_ACCURACY
      :value: 'bar_chart'



   .. py:attribute:: FORECASTING_VOLUME
      :value: 'bar_chart_volume'



   .. py:attribute:: FORECASTING_HISTORY_LENGTH_ACCURACY
      :value: 'bar_chart_accuracy_by_history'



.. py:class:: FieldDescriptorType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: STRING
      :value: 'STRING'



   .. py:attribute:: INTEGER
      :value: 'INTEGER'



   .. py:attribute:: FLOAT
      :value: 'FLOAT'



   .. py:attribute:: BOOLEAN
      :value: 'BOOLEAN'



   .. py:attribute:: DATETIME
      :value: 'DATETIME'



   .. py:attribute:: DATE
      :value: 'DATE'



.. py:class:: WorkflowNodeInputType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: USER_INPUT
      :value: 'USER_INPUT'



   .. py:attribute:: WORKFLOW_VARIABLE
      :value: 'WORKFLOW_VARIABLE'



   .. py:attribute:: IGNORE
      :value: 'IGNORE'



   .. py:attribute:: CONSTANT
      :value: 'CONSTANT'



.. py:class:: WorkflowNodeOutputType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: ATTACHMENT
      :value: 'ATTACHMENT'



   .. py:attribute:: BOOLEAN
      :value: 'BOOLEAN'



   .. py:attribute:: FLOAT
      :value: 'FLOAT'



   .. py:attribute:: INTEGER
      :value: 'INTEGER'



   .. py:attribute:: DICT
      :value: 'DICT'



   .. py:attribute:: LIST
      :value: 'LIST'



   .. py:attribute:: STRING
      :value: 'STRING'



   .. py:attribute:: RUNTIME_SCHEMA
      :value: 'RUNTIME_SCHEMA'



   .. py:attribute:: ANY
      :value: 'ANY'



   .. py:method:: normalize_type(python_type)
      :classmethod:



.. py:class:: StdDevThresholdType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: ABSOLUTE
      :value: 'ABSOLUTE'



   .. py:attribute:: PERCENTILE
      :value: 'PERCENTILE'



   .. py:attribute:: STDDEV
      :value: 'STDDEV'



.. py:class:: DataType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: INTEGER
      :value: 'integer'



   .. py:attribute:: FLOAT
      :value: 'float'



   .. py:attribute:: STRING
      :value: 'string'



   .. py:attribute:: DATE
      :value: 'date'



   .. py:attribute:: DATETIME
      :value: 'datetime'



   .. py:attribute:: BOOLEAN
      :value: 'boolean'



   .. py:attribute:: LIST
      :value: 'list'



   .. py:attribute:: STRUCT
      :value: 'struct'



   .. py:attribute:: NULL
      :value: 'null'



   .. py:attribute:: BINARY
      :value: 'binary'



.. py:class:: AgentInterface

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: DEFAULT
      :value: 'DEFAULT'



   .. py:attribute:: CHAT
      :value: 'CHAT'



   .. py:attribute:: MATRIX
      :value: 'MATRIX'



   .. py:attribute:: AUTONOMOUS
      :value: 'AUTONOMOUS'



.. py:class:: WorkflowNodeTemplateType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: TRIGGER
      :value: 'trigger'



   .. py:attribute:: DEFAULT
      :value: 'default'



.. py:class:: ProjectConfigType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: CONSTRAINTS
      :value: 'CONSTRAINTS'



   .. py:attribute:: CHAT_FEEDBACK
      :value: 'CHAT_FEEDBACK'



   .. py:attribute:: REVIEW_MODE
      :value: 'REVIEW_MODE'



.. py:class:: CPUSize

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: SMALL
      :value: 'small'



   .. py:attribute:: MEDIUM
      :value: 'medium'



   .. py:attribute:: LARGE
      :value: 'large'



.. py:class:: MemorySize

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: SMALL
      :value: 16



   .. py:attribute:: MEDIUM
      :value: 32



   .. py:attribute:: LARGE
      :value: 64



   .. py:attribute:: XLARGE
      :value: 128



   .. py:method:: from_value(value)
      :classmethod:



.. py:class:: ResponseSectionType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: AGENT_FLOW_BUTTON
      :value: 'agent_flow_button'



   .. py:attribute:: ATTACHMENTS
      :value: 'attachments'



   .. py:attribute:: BASE64_IMAGE
      :value: 'base64_image'



   .. py:attribute:: CHART
      :value: 'chart'



   .. py:attribute:: CODE
      :value: 'code'



   .. py:attribute:: COLLAPSIBLE_COMPONENT
      :value: 'collapsible_component'



   .. py:attribute:: IMAGE_URL
      :value: 'image_url'



   .. py:attribute:: RUNTIME_SCHEMA
      :value: 'runtime_schema'



   .. py:attribute:: LIST
      :value: 'list'



   .. py:attribute:: TABLE
      :value: 'table'



   .. py:attribute:: TEXT
      :value: 'text'



.. py:class:: CodeLanguage

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: PYTHON
      :value: 'python'



   .. py:attribute:: SQL
      :value: 'sql'



.. py:class:: DeploymentConversationType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: CHAT_LLM
      :value: 'CHATLLM'



   .. py:attribute:: SIMPLE_AGENT
      :value: 'SIMPLE_AGENT'



   .. py:attribute:: COMPLEX_AGENT
      :value: 'COMPLEX_AGENT'



   .. py:attribute:: WORKFLOW_AGENT
      :value: 'WORKFLOW_AGENT'



   .. py:attribute:: COPILOT
      :value: 'COPILOT'



   .. py:attribute:: AGENT_CONTROLLER
      :value: 'AGENT_CONTROLLER'



   .. py:attribute:: CODE_LLM
      :value: 'CODE_LLM'



   .. py:attribute:: CODE_LLM_AGENT
      :value: 'CODE_LLM_AGENT'



   .. py:attribute:: CHAT_LLM_TASK
      :value: 'CHAT_LLM_TASK'



   .. py:attribute:: COMPUTER_AGENT
      :value: 'COMPUTER_AGENT'



   .. py:attribute:: SEARCH_LLM
      :value: 'SEARCH_LLM'



   .. py:attribute:: APP_LLM
      :value: 'APP_LLM'



   .. py:attribute:: TEST_AGENT
      :value: 'TEST_AGENT'



.. py:class:: AgentClientType

   Bases: :py:obj:`ApiEnum`


   Generic enumeration.

   Derive from this class to define new enumerations.


   .. py:attribute:: CHAT_UI
      :value: 'CHAT_UI'



   .. py:attribute:: MESSAGING_APP
      :value: 'MESSAGING_APP'



   .. py:attribute:: API
      :value: 'API'



.. py:class:: SamplingConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for the sampling config of a feature group


   .. py:attribute:: sampling_method
      :type:  abacusai.api_class.enums.SamplingMethodType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



   .. py:method:: __post_init__()


.. py:class:: NSamplingConfig

   Bases: :py:obj:`SamplingConfig`


   The number of distinct values of the key columns to include in the sample, or number of rows if key columns not specified.

   :param sample_count: The number of rows to include in the sample
   :type sample_count: int
   :param key_columns: The feature(s) to use as the key(s) when sampling
   :type key_columns: List[str]


   .. py:attribute:: sample_count
      :type:  int


   .. py:attribute:: key_columns
      :type:  List[str]
      :value: []



   .. py:method:: __post_init__()


.. py:class:: PercentSamplingConfig

   Bases: :py:obj:`SamplingConfig`


   The fraction of distinct values of the feature group to include in the sample.

   :param sample_percent: The percentage of the rows to sample
   :type sample_percent: float
   :param key_columns: The feature(s) to use as the key(s) when sampling
   :type key_columns: List[str]


   .. py:attribute:: sample_percent
      :type:  float


   .. py:attribute:: key_columns
      :type:  List[str]
      :value: []



   .. py:method:: __post_init__()


.. py:class:: _SamplingConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_class_key
      :value: 'sampling_method'



   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_map


.. py:class:: MergeConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for the merge config of a feature group


   .. py:attribute:: merge_mode
      :type:  abacusai.api_class.enums.MergeMode
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



   .. py:method:: __post_init__()


.. py:class:: LastNMergeConfig

   Bases: :py:obj:`MergeConfig`


   Merge LAST N chunks/versions of an incremental dataset.

   :param num_versions: The number of versions to merge. num_versions == 0 means merge all versions.
   :type num_versions: int
   :param include_version_timestamp_column: If set, include a column with the creation timestamp of source FG versions.
   :type include_version_timestamp_column: bool


   .. py:attribute:: num_versions
      :type:  int


   .. py:attribute:: include_version_timestamp_column
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: TimeWindowMergeConfig

   Bases: :py:obj:`MergeConfig`


   Merge rows within a given timewindow of the most recent timestamp

   :param feature_name: Time based column to index on
   :type feature_name: str
   :param time_window_size_ms: Range of merged rows will be [MAX_TIME - time_window_size_ms, MAX_TIME]
   :type time_window_size_ms: int
   :param include_version_timestamp_column: If set, include a column with the creation timestamp of source FG versions.
   :type include_version_timestamp_column: bool


   .. py:attribute:: feature_name
      :type:  str


   .. py:attribute:: time_window_size_ms
      :type:  int


   .. py:attribute:: include_version_timestamp_column
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: _MergeConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_class_key
      :value: 'merge_mode'



   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_map


.. py:class:: OperatorConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Configuration for a template Feature Group Operation


   .. py:attribute:: operator_type
      :type:  abacusai.api_class.enums.OperatorType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



   .. py:method:: __post_init__()


.. py:class:: UnpivotConfig

   Bases: :py:obj:`OperatorConfig`


   Unpivot Columns in a FeatureGroup.

   :param columns: Which columns to unpivot.
   :type columns: List[str]
   :param index_column: Name of new column containing the unpivoted column names as its values
   :type index_column: str
   :param value_column: Name of new column containing the row values that were unpivoted.
   :type value_column: str
   :param exclude: If True, the unpivoted columns are all the columns EXCEPT the ones in the columns argument. Default is False.
   :type exclude: bool


   .. py:attribute:: columns
      :type:  List[str]
      :value: None



   .. py:attribute:: index_column
      :type:  str
      :value: None



   .. py:attribute:: value_column
      :type:  str
      :value: None



   .. py:attribute:: exclude
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: MarkdownConfig

   Bases: :py:obj:`OperatorConfig`


   Transform a input column to a markdown column.

   :param input_column: Name of input column to transform.
   :type input_column: str
   :param output_column: Name of output column to store transformed data.
   :type output_column: str
   :param input_column_type: Type of input column to transform.
   :type input_column_type: MarkdownOperatorInputType


   .. py:attribute:: input_column
      :type:  str
      :value: None



   .. py:attribute:: output_column
      :type:  str
      :value: None



   .. py:attribute:: input_column_type
      :type:  abacusai.api_class.enums.MarkdownOperatorInputType
      :value: None



   .. py:method:: __post_init__()


.. py:class:: CrawlerTransformConfig

   Bases: :py:obj:`OperatorConfig`


   Transform a input column of urls to html text

   :param input_column: Name of input column to transform.
   :type input_column: str
   :param output_column: Name of output column to store transformed data.
   :type output_column: str
   :param depth_column: Increasing depth explores more links, capturing more content
   :type depth_column: str
   :param disable_host_restriction: If True, will not restrict crawling to the same host.
   :type disable_host_restriction: bool
   :param honour_website_rules: If True, will respect robots.txt rules.
   :type honour_website_rules: bool
   :param user_agent: If provided, will use this user agent instead of randomly selecting one.
   :type user_agent: str


   .. py:attribute:: input_column
      :type:  str
      :value: None



   .. py:attribute:: output_column
      :type:  str
      :value: None



   .. py:attribute:: depth_column
      :type:  str
      :value: None



   .. py:attribute:: input_column_type
      :type:  str
      :value: None



   .. py:attribute:: crawl_depth
      :type:  int
      :value: None



   .. py:attribute:: disable_host_restriction
      :type:  bool
      :value: None



   .. py:attribute:: honour_website_rules
      :type:  bool
      :value: None



   .. py:attribute:: user_agent
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ExtractDocumentDataConfig

   Bases: :py:obj:`OperatorConfig`


   Extracts data from documents.

   :param doc_id_column: Name of input document ID column.
   :type doc_id_column: str
   :param document_column: Name of the input document column which contains the page infos. This column will be transformed to include the document processing config in the output feature group.
   :type document_column: str
   :param document_processing_config: Document processing configuration.
   :type document_processing_config: DocumentProcessingConfig


   .. py:attribute:: doc_id_column
      :type:  str
      :value: None



   .. py:attribute:: document_column
      :type:  str
      :value: None



   .. py:attribute:: document_processing_config
      :type:  abacusai.api_class.dataset.DocumentProcessingConfig
      :value: None



   .. py:method:: __post_init__()


.. py:class:: DataGenerationConfig

   Bases: :py:obj:`OperatorConfig`


   Generate synthetic data using a model for finetuning an LLM.

   :param prompt_col: Name of the input prompt column.
   :type prompt_col: str
   :param completion_col: Name of the output completion column.
   :type completion_col: str
   :param description_col: Name of the description column.
   :type description_col: str
   :param id_col: Name of the identifier column.
   :type id_col: str
   :param generation_instructions: Instructions for the data generation model.
   :type generation_instructions: str
   :param temperature: Sampling temperature for the model.
   :type temperature: float
   :param fewshot_examples: Number of fewshot examples used to prompt the model.
   :type fewshot_examples: int
   :param concurrency: Number of concurrent processes.
   :type concurrency: int
   :param examples_per_target: Number of examples per target.
   :type examples_per_target: int
   :param subset_size: Size of the subset to use for generation.
   :type subset_size: Optional[int]
   :param verify_response: Whether to verify the response.
   :type verify_response: bool
   :param token_budget: Token budget for generation.
   :type token_budget: int
   :param oversample: Whether to oversample the data.
   :type oversample: bool
   :param documentation_char_limit: Character limit for documentation.
   :type documentation_char_limit: int
   :param frequency_penalty: Penalty for frequency of token appearance.
   :type frequency_penalty: float
   :param model: Model to use for data generation.
   :type model: str
   :param seed: Seed for random number generation.
   :type seed: Optional[int]


   .. py:attribute:: prompt_col
      :type:  str
      :value: None



   .. py:attribute:: completion_col
      :type:  str
      :value: None



   .. py:attribute:: description_col
      :type:  str
      :value: None



   .. py:attribute:: id_col
      :type:  str
      :value: None



   .. py:attribute:: generation_instructions
      :type:  str
      :value: None



   .. py:attribute:: temperature
      :type:  float
      :value: None



   .. py:attribute:: fewshot_examples
      :type:  int
      :value: None



   .. py:attribute:: concurrency
      :type:  int
      :value: None



   .. py:attribute:: examples_per_target
      :type:  int
      :value: None



   .. py:attribute:: subset_size
      :type:  int
      :value: None



   .. py:attribute:: verify_response
      :type:  bool
      :value: None



   .. py:attribute:: token_budget
      :type:  int
      :value: None



   .. py:attribute:: oversample
      :type:  bool
      :value: None



   .. py:attribute:: documentation_char_limit
      :type:  int
      :value: None



   .. py:attribute:: frequency_penalty
      :type:  float
      :value: None



   .. py:attribute:: model
      :type:  str
      :value: None



   .. py:attribute:: seed
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: UnionTransformConfig

   Bases: :py:obj:`OperatorConfig`


   Takes Union of current feature group with 1 or more selected feature groups of same type.

   :param feature_group_ids: List of feature group IDs to union with source FG.
   :type feature_group_ids: List[str]
   :param drop_non_intersecting_columns: If true, will drop columns that are not present in all feature groups. If false fills missing columns with nulls.
   :type drop_non_intersecting_columns: bool


   .. py:attribute:: feature_group_ids
      :type:  List[str]
      :value: None



   .. py:attribute:: drop_non_intersecting_columns
      :type:  bool
      :value: False



   .. py:method:: __post_init__()


.. py:class:: _OperatorConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   A class to select and return the the correct type of Operator Config based on a serialized OperatorConfig instance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'operator_type'



   .. py:attribute:: config_class_map


.. py:class:: TrainingConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for the training config options used to train the model.


   .. py:attribute:: _upper_snake_case_keys
      :type:  bool
      :value: True



   .. py:attribute:: _support_kwargs
      :type:  bool
      :value: True



   .. py:attribute:: kwargs
      :type:  dict


   .. py:attribute:: problem_type
      :type:  abacusai.api_class.enums.ProblemType
      :value: None



   .. py:attribute:: algorithm
      :type:  str
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: PersonalizationTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the PERSONALIZATION problem type

   :param objective: Ranking scheme used to select final best model.
   :type objective: PersonalizationObjective
   :param sort_objective: Ranking scheme used to sort models on the metrics page.
   :type sort_objective: PersonalizationObjective
   :param training_mode: whether to train in production or experimental mode. Defaults to EXP.
   :type training_mode: PersonalizationTrainingMode
   :param target_action_types: List of action types to use as targets for training.
   :type target_action_types: List[str]
   :param target_action_weights: Dictionary of action types to weights for training.
   :type target_action_weights: Dict[str, float]
   :param session_event_types: List of event types to treat as occurrences of sessions.
   :type session_event_types: List[str]
   :param test_split: Percent of dataset to use for test data. We support using a range between 6% to 20% of your dataset to use as test data.
   :type test_split: int
   :param recent_days_for_training: Limit training data to a certain latest number of days.
   :type recent_days_for_training: int
   :param training_start_date: Only consider training interaction data after this date. Specified in the timezone of the dataset.
   :type training_start_date: str
   :param test_on_user_split: Use user splits instead of using time splits, when validating and testing the model.
   :type test_on_user_split: bool
   :param test_split_on_last_k_items: Use last k items instead of global timestamp splits, when validating and testing the model.
   :type test_split_on_last_k_items: bool
   :param test_last_items_length: Number of items to leave out for each user when using leave k out folds.
   :type test_last_items_length: int
   :param test_window_length_hours: Duration (in hours) of most recent time window to use when validating and testing the model.
   :type test_window_length_hours: int
   :param explicit_time_split: Sets an explicit time-based test boundary.
   :type explicit_time_split: bool
   :param test_row_indicator: Column indicating which rows to use for training (TRAIN), validation (VAL) and testing (TEST).
   :type test_row_indicator: str
   :param full_data_retraining: Train models separately with all the data.
   :type full_data_retraining: bool
   :param sequential_training: Train a mode sequentially through time.
   :type sequential_training: bool
   :param data_split_feature_group_table_name: Specify the table name of the feature group to export training data with the fold column.
   :type data_split_feature_group_table_name: str
   :param optimized_event_type: The final event type to optimize for and compute metrics on.
   :type optimized_event_type: str
   :param dropout_rate: Dropout rate for neural network.
   :type dropout_rate: int
   :param batch_size: Batch size for neural network.
   :type batch_size: BatchSize
   :param disable_transformer: Disable training the transformer algorithm.
   :type disable_transformer: bool
   :param disable_gpu: Disable training on GPU.
   :type disable_gpu: boo
   :param filter_history: Do not recommend items the user has already interacted with.
   :type filter_history: bool
   :param action_types_exclusion_days: Mapping from action type to number of days for which we exclude previously interacted items from prediction
   :type action_types_exclusion_days: Dict[str, float]
   :param session_dedupe_mins: Minimum number of minutes between two sessions for a user.
   :type session_dedupe_mins: float
   :param max_history_length: Maximum length of user-item history to include user in training examples.
   :type max_history_length: int
   :param compute_rerank_metrics: Compute metrics based on rerank results.
   :type compute_rerank_metrics: bool
   :param add_time_features: Include interaction time as a feature.
   :type add_time_features: bool
   :param disable_timestamp_scalar_features: Exclude timestamp scalar features.
   :type disable_timestamp_scalar_features: bool
   :param compute_session_metrics: Evaluate models based on how well they are able to predict the next session of interactions.
   :type compute_session_metrics: bool
   :param max_user_history_len_percentile: Filter out users with history length above this percentile.
   :type max_user_history_len_percentile: int
   :param downsample_item_popularity_percentile: Downsample items more popular than this percentile.
   :type downsample_item_popularity_percentile: float
   :param use_user_id_feature: Use user id as a feature in CTR models.
   :type use_user_id_feature: bool
   :param min_item_history: Minimum number of interactions an item must have to be included in training.
   :type min_item_history: int
   :param query_column: Name of column in the interactions table that represents a natural language query, e.g. 'blue t-shirt'.
   :type query_column: str
   :param item_query_column: Name of column in the item catalog that will be matched to the query column in the interactions table.
   :type item_query_column: str
   :param include_item_id_feature: Add Item-Id to the input features of the model. Applicable for Embedding distance and CTR models.
   :type include_item_id_feature: bool


   .. py:attribute:: objective
      :type:  abacusai.api_class.enums.PersonalizationObjective
      :value: None



   .. py:attribute:: sort_objective
      :type:  abacusai.api_class.enums.PersonalizationObjective
      :value: None



   .. py:attribute:: training_mode
      :type:  abacusai.api_class.enums.PersonalizationTrainingMode
      :value: None



   .. py:attribute:: target_action_types
      :type:  List[str]
      :value: None



   .. py:attribute:: target_action_weights
      :type:  Dict[str, float]
      :value: None



   .. py:attribute:: session_event_types
      :type:  List[str]
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: recent_days_for_training
      :type:  int
      :value: None



   .. py:attribute:: training_start_date
      :type:  str
      :value: None



   .. py:attribute:: test_on_user_split
      :type:  bool
      :value: None



   .. py:attribute:: test_split_on_last_k_items
      :type:  bool
      :value: None



   .. py:attribute:: test_last_items_length
      :type:  int
      :value: None



   .. py:attribute:: test_window_length_hours
      :type:  int
      :value: None



   .. py:attribute:: explicit_time_split
      :type:  bool
      :value: None



   .. py:attribute:: test_row_indicator
      :type:  str
      :value: None



   .. py:attribute:: full_data_retraining
      :type:  bool
      :value: None



   .. py:attribute:: sequential_training
      :type:  bool
      :value: None



   .. py:attribute:: data_split_feature_group_table_name
      :type:  str
      :value: None



   .. py:attribute:: optimized_event_type
      :type:  str
      :value: None



   .. py:attribute:: dropout_rate
      :type:  int
      :value: None



   .. py:attribute:: batch_size
      :type:  abacusai.api_class.enums.BatchSize
      :value: None



   .. py:attribute:: disable_transformer
      :type:  bool
      :value: None



   .. py:attribute:: disable_gpu
      :type:  bool
      :value: None



   .. py:attribute:: filter_history
      :type:  bool
      :value: None



   .. py:attribute:: action_types_exclusion_days
      :type:  Dict[str, float]
      :value: None



   .. py:attribute:: max_history_length
      :type:  int
      :value: None



   .. py:attribute:: compute_rerank_metrics
      :type:  bool
      :value: None



   .. py:attribute:: add_time_features
      :type:  bool
      :value: None



   .. py:attribute:: disable_timestamp_scalar_features
      :type:  bool
      :value: None



   .. py:attribute:: compute_session_metrics
      :type:  bool
      :value: None



   .. py:attribute:: query_column
      :type:  str
      :value: None



   .. py:attribute:: item_query_column
      :type:  str
      :value: None



   .. py:attribute:: use_user_id_feature
      :type:  bool
      :value: None



   .. py:attribute:: session_dedupe_mins
      :type:  float
      :value: None



   .. py:attribute:: include_item_id_feature
      :type:  bool
      :value: None



   .. py:attribute:: max_user_history_len_percentile
      :type:  int
      :value: None



   .. py:attribute:: downsample_item_popularity_percentile
      :type:  float
      :value: None



   .. py:attribute:: min_item_history
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: RegressionTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the PREDICTIVE_MODELING problem type

   :param objective: Ranking scheme used to select final best model.
   :type objective: RegressionObjective
   :param sort_objective: Ranking scheme used to sort models on the metrics page.
   :type sort_objective: RegressionObjective
   :param tree_hpo_mode: (RegressionTreeHPOMode): Turning off Rapid Experimentation will take longer to train.
   :param type_of_split: Type of data splitting into train/test (validation also).
   :type type_of_split: RegressionTypeOfSplit
   :param test_split: Percent of dataset to use for test data. We support using a range between 5% to 20% of your dataset to use as test data.
   :type test_split: int
   :param disable_test_val_fold: Do not create a TEST_VAL set. All records which would be part of the TEST_VAL fold otherwise, remain in the TEST fold.
   :type disable_test_val_fold: bool
   :param k_fold_cross_validation: Use this to force k-fold cross validation bagging on or off.
   :type k_fold_cross_validation: bool
   :param num_cv_folds: Specify the value of k in k-fold cross validation.
   :type num_cv_folds: int
   :param timestamp_based_splitting_column: Timestamp column selected for splitting into test and train.
   :type timestamp_based_splitting_column: str
   :param timestamp_based_splitting_method: Method of selecting TEST set, top percentile wise or after a given timestamp.
   :type timestamp_based_splitting_method: RegressionTimeSplitMethod
   :param test_splitting_timestamp: Rows with timestamp greater than this will be considered to be in the test set.
   :type test_splitting_timestamp: str
   :param sampling_unit_keys: Constrain train/test separation to partition a column.
   :type sampling_unit_keys: List[str]
   :param test_row_indicator: Column indicating which rows to use for training (TRAIN) and testing (TEST). Validation (VAL) can also be specified.
   :type test_row_indicator: str
   :param full_data_retraining: Train models separately with all the data.
   :type full_data_retraining: bool
   :param rebalance_classes: Class weights are computed as the inverse of the class frequency from the training dataset when this option is selected as "Yes". It is useful when the classes in the dataset are unbalanced.
                             Re-balancing classes generally boosts recall at the cost of precision on rare classes.
   :type rebalance_classes: bool
   :param rare_class_augmentation_threshold: Augments any rare class whose relative frequency with respect to the most frequent class is less than this threshold. Default = 0.1 for classification problems with rare classes.
   :type rare_class_augmentation_threshold: float
   :param augmentation_strategy: Strategy to deal with class imbalance and data augmentation.
   :type augmentation_strategy: RegressionAugmentationStrategy
   :param training_rows_downsample_ratio: Uses this ratio to train on a sample of the dataset provided.
   :type training_rows_downsample_ratio: float
   :param active_labels_column: Specify a column to use as the active columns in a multi label setting.
   :type active_labels_column: str
   :param min_categorical_count: Minimum threshold to consider a value different from the unknown placeholder.
   :type min_categorical_count: int
   :param sample_weight: Specify a column to use as the weight of a sample for training and eval.
   :type sample_weight: str
   :param numeric_clipping_percentile: Uses this option to clip the top and bottom x percentile of numeric feature columns where x is the value of this option.
   :type numeric_clipping_percentile: float
   :param target_transform: Specify a transform (e.g. log, quantile) to apply to the target variable.
   :type target_transform: RegressionTargetTransform
   :param ignore_datetime_features: Remove all datetime features from the model. Useful while generalizing to different time periods.
   :type ignore_datetime_features: bool
   :param max_text_words: Maximum number of words to use from text fields.
   :type max_text_words: int
   :param perform_feature_selection: If enabled, additional algorithms which support feature selection as a pretraining step will be trained separately with the selected subset of features. The details about their selected features can be found in their respective logs.
   :type perform_feature_selection: bool
   :param feature_selection_intensity: This determines the strictness with which features will be filtered out. 1 being very lenient (more features kept), 100 being very strict.
   :type feature_selection_intensity: int
   :param batch_size: Batch size.
   :type batch_size: BatchSize
   :param dropout_rate: Dropout percentage rate.
   :type dropout_rate: int
   :param pretrained_model_name: Enable algorithms which process text using pretrained multilingual NLP models.
   :type pretrained_model_name: str
   :param pretrained_llm_name: Enable algorithms which process text using pretrained large language models.
   :type pretrained_llm_name: str
   :param is_multilingual: Enable algorithms which process text using pretrained multilingual NLP models.
   :type is_multilingual: bool
   :param loss_function: Loss function to be used as objective for model training.
   :type loss_function: RegressionLossFunction
   :param loss_parameters: Loss function params in format <key>=<value>;<key>=<value>;.....
   :type loss_parameters: str
   :param target_encode_categoricals: Use this to turn target encoding on categorical features on or off.
   :type target_encode_categoricals: bool
   :param drop_original_categoricals: This option helps us choose whether to also feed the original label encoded categorical columns to the mdoels along with their target encoded versions.
   :type drop_original_categoricals: bool
   :param monotonically_increasing_features: Constrain the model such that it behaves as if the target feature is monotonically increasing with the selected features
   :type monotonically_increasing_features: List[str]
   :param monotonically_decreasing_features: Constrain the model such that it behaves as if the target feature is monotonically decreasing with the selected features
   :type monotonically_decreasing_features: List[str]
   :param data_split_feature_group_table_name: Specify the table name of the feature group to export training data with the fold column.
   :type data_split_feature_group_table_name: str
   :param custom_loss_functions: Registered custom losses available for selection.
   :type custom_loss_functions: List[str]
   :param custom_metrics: Registered custom metrics available for selection.
   :type custom_metrics: List[str]
   :param partial_dependence_analysis: Specify whether to run partial dependence plots for all features or only some features.
   :type partial_dependence_analysis: PartialDependenceAnalysis
   :param do_masked_language_model_pretraining: Specify whether to run a masked language model unsupervised pretraining step before supervized training in certain supported algorithms which use BERT-like backbones.
   :type do_masked_language_model_pretraining: bool
   :param max_tokens_in_sentence: Specify the max tokens to be kept in a sentence based on the truncation strategy.
   :type max_tokens_in_sentence: int
   :param truncation_strategy: What strategy to use to deal with text rows with more than a given number of tokens (if num of tokens is more than "max_tokens_in_sentence").
   :type truncation_strategy: str


   .. py:attribute:: objective
      :type:  abacusai.api_class.enums.RegressionObjective
      :value: None



   .. py:attribute:: sort_objective
      :type:  abacusai.api_class.enums.RegressionObjective
      :value: None



   .. py:attribute:: tree_hpo_mode
      :type:  abacusai.api_class.enums.RegressionTreeHPOMode
      :value: None



   .. py:attribute:: partial_dependence_analysis
      :type:  abacusai.api_class.enums.PartialDependenceAnalysis
      :value: None



   .. py:attribute:: type_of_split
      :type:  abacusai.api_class.enums.RegressionTypeOfSplit
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: disable_test_val_fold
      :type:  bool
      :value: None



   .. py:attribute:: k_fold_cross_validation
      :type:  bool
      :value: None



   .. py:attribute:: num_cv_folds
      :type:  int
      :value: None



   .. py:attribute:: timestamp_based_splitting_column
      :type:  str
      :value: None



   .. py:attribute:: timestamp_based_splitting_method
      :type:  abacusai.api_class.enums.RegressionTimeSplitMethod
      :value: None



   .. py:attribute:: test_splitting_timestamp
      :type:  str
      :value: None



   .. py:attribute:: sampling_unit_keys
      :type:  List[str]
      :value: None



   .. py:attribute:: test_row_indicator
      :type:  str
      :value: None



   .. py:attribute:: full_data_retraining
      :type:  bool
      :value: None



   .. py:attribute:: rebalance_classes
      :type:  bool
      :value: None



   .. py:attribute:: rare_class_augmentation_threshold
      :type:  float
      :value: None



   .. py:attribute:: augmentation_strategy
      :type:  abacusai.api_class.enums.RegressionAugmentationStrategy
      :value: None



   .. py:attribute:: training_rows_downsample_ratio
      :type:  float
      :value: None



   .. py:attribute:: active_labels_column
      :type:  str
      :value: None



   .. py:attribute:: min_categorical_count
      :type:  int
      :value: None



   .. py:attribute:: sample_weight
      :type:  str
      :value: None



   .. py:attribute:: numeric_clipping_percentile
      :type:  float
      :value: None



   .. py:attribute:: target_transform
      :type:  abacusai.api_class.enums.RegressionTargetTransform
      :value: None



   .. py:attribute:: ignore_datetime_features
      :type:  bool
      :value: None



   .. py:attribute:: max_text_words
      :type:  int
      :value: None



   .. py:attribute:: perform_feature_selection
      :type:  bool
      :value: None



   .. py:attribute:: feature_selection_intensity
      :type:  int
      :value: None



   .. py:attribute:: batch_size
      :type:  abacusai.api_class.enums.BatchSize
      :value: None



   .. py:attribute:: dropout_rate
      :type:  int
      :value: None



   .. py:attribute:: pretrained_model_name
      :type:  str
      :value: None



   .. py:attribute:: pretrained_llm_name
      :type:  str
      :value: None



   .. py:attribute:: is_multilingual
      :type:  bool
      :value: None



   .. py:attribute:: do_masked_language_model_pretraining
      :type:  bool
      :value: None



   .. py:attribute:: max_tokens_in_sentence
      :type:  int
      :value: None



   .. py:attribute:: truncation_strategy
      :type:  str
      :value: None



   .. py:attribute:: loss_function
      :type:  abacusai.api_class.enums.RegressionLossFunction
      :value: None



   .. py:attribute:: loss_parameters
      :type:  str
      :value: None



   .. py:attribute:: target_encode_categoricals
      :type:  bool
      :value: None



   .. py:attribute:: drop_original_categoricals
      :type:  bool
      :value: None



   .. py:attribute:: monotonically_increasing_features
      :type:  List[str]
      :value: None



   .. py:attribute:: monotonically_decreasing_features
      :type:  List[str]
      :value: None



   .. py:attribute:: data_split_feature_group_table_name
      :type:  str
      :value: None



   .. py:attribute:: custom_loss_functions
      :type:  List[str]
      :value: None



   .. py:attribute:: custom_metrics
      :type:  List[str]
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ForecastingTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the FORECASTING problem type

   :param prediction_length: How many timesteps in the future to predict.
   :type prediction_length: int
   :param objective: Ranking scheme used to select final best model.
   :type objective: ForecastingObjective
   :param sort_objective: Ranking scheme used to sort models on the metrics page.
   :type sort_objective: ForecastingObjective
   :param forecast_frequency: Forecast frequency.
   :type forecast_frequency: ForecastingFrequency
   :param probability_quantiles: Prediction quantiles.
   :type probability_quantiles: List[float]
   :param force_prediction_length: Force length of test window to be the same as prediction length.
   :type force_prediction_length: int
   :param filter_items: Filter items with small history and volume.
   :type filter_items: bool
   :param enable_feature_selection: Enable feature selection.
   :type enable_feature_selection: bool
   :param enable_padding: Pad series to the max_date of the dataset
   :type enable_padding: bool
   :param enable_cold_start: Enable cold start forecasting by training/predicting for zero history items.
   :type enable_cold_start: bool
   :param enable_multiple_backtests: Whether to enable multiple backtesting or not.
   :type enable_multiple_backtests: bool
   :param num_backtesting_windows: Total backtesting windows to use for the training.
   :type num_backtesting_windows: int
   :param backtesting_window_step_size: Use this step size to shift backtesting windows for model training.
   :type backtesting_window_step_size: int
   :param full_data_retraining: Train models separately with all the data.
   :type full_data_retraining: bool
   :param additional_forecast_keys: List[str]: List of categoricals in timeseries that can act as multi-identifier.
   :param experimentation_mode: Selecting Thorough Experimentation will take longer to train.
   :type experimentation_mode: ExperimentationMode
   :param type_of_split: Type of data splitting into train/test.
   :type type_of_split: ForecastingDataSplitType
   :param test_by_item: Partition train/test data by item rather than time if true.
   :type test_by_item: bool
   :param test_start: Limit training data to dates before the given test start.
   :type test_start: str
   :param test_split: Percent of dataset to use for test data. We support using a range between 5% to 20% of your dataset to use as test data.
   :type test_split: int
   :param loss_function: Loss function for training neural network.
   :type loss_function: ForecastingLossFunction
   :param underprediction_weight: Weight for underpredictions
   :type underprediction_weight: float
   :param disable_networks_without_analytic_quantiles: Disable neural networks, which quantile functions do not have analytic expressions (e.g, mixture models)
   :type disable_networks_without_analytic_quantiles: bool
   :param initial_learning_rate: Initial learning rate.
   :type initial_learning_rate: float
   :param l2_regularization_factor: L2 regularization factor.
   :type l2_regularization_factor: float
   :param dropout_rate: Dropout percentage rate.
   :type dropout_rate: int
   :param recurrent_layers: Number of recurrent layers to stack in network.
   :type recurrent_layers: int
   :param recurrent_units: Number of units in each recurrent layer.
   :type recurrent_units: int
   :param convolutional_layers: Number of convolutional layers to stack on top of recurrent layers in network.
   :type convolutional_layers: int
   :param convolution_filters: Number of filters in each convolution.
   :type convolution_filters: int
   :param local_scaling_mode: Options to make NN inputs stationary in high dynamic range datasets.
   :type local_scaling_mode: ForecastingLocalScaling
   :param zero_predictor: Include subnetwork to classify points where target equals zero.
   :type zero_predictor: bool
   :param skip_missing: Make the RNN ignore missing entries rather instead of processing them.
   :type skip_missing: bool
   :param batch_size: Batch size.
   :type batch_size: ForecastingBatchSize
   :param batch_renormalization: Enable batch renormalization between layers.
   :type batch_renormalization: bool
   :param history_length: While training, how much history to consider.
   :type history_length: int
   :param prediction_step_size: Number of future periods to include in objective for each training sample.
   :type prediction_step_size: int
   :param training_point_overlap: Amount of overlap to allow between training samples.
   :type training_point_overlap: float
   :param max_scale_context: Maximum context to use for local scaling.
   :type max_scale_context: int
   :param quantiles_extension_method: Quantile extension method
   :type quantiles_extension_method: ForecastingQuanitlesExtensionMethod
   :param number_of_samples: Number of samples for ancestral simulation
   :type number_of_samples: int
   :param symmetrize_quantiles: Force symmetric quantiles (like in Gaussian distribution)
   :type symmetrize_quantiles: bool
   :param use_log_transforms: Apply logarithmic transformations to input data.
   :type use_log_transforms: bool
   :param smooth_history: Smooth (low pass filter) the timeseries.
   :type smooth_history: float
   :param local_scale_target: Using per training/prediction window target scaling.
   :type local_scale_target: bool
   :param use_clipping: Apply clipping to input data to stabilize the training.
   :type use_clipping: bool
   :param timeseries_weight_column: If set, we use the values in this column from timeseries data to assign time dependent item weights during training and evaluation.
   :type timeseries_weight_column: str
   :param item_attributes_weight_column: If set, we use the values in this column from item attributes data to assign weights to items during training and evaluation.
   :type item_attributes_weight_column: str
   :param use_timeseries_weights_in_objective: If True, we include weights from column set as "TIMESERIES WEIGHT COLUMN" in objective functions.
   :type use_timeseries_weights_in_objective: bool
   :param use_item_weights_in_objective: If True, we include weights from column set as "ITEM ATTRIBUTES WEIGHT COLUMN" in objective functions.
   :type use_item_weights_in_objective: bool
   :param skip_timeseries_weight_scaling: If True, we will avoid normalizing the weights.
   :type skip_timeseries_weight_scaling: bool
   :param timeseries_loss_weight_column: Use value in this column to weight the loss while training.
   :type timeseries_loss_weight_column: str
   :param use_item_id: Include a feature to indicate the item being forecast.
   :type use_item_id: bool
   :param use_all_item_totals: Include as input total target across items.
   :type use_all_item_totals: bool
   :param handle_zeros_as_missing_values: If True, handle zero values in demand as missing data.
   :type handle_zeros_as_missing_values: bool
   :param datetime_holiday_calendars: Holiday calendars to augment training with.
   :type datetime_holiday_calendars: List[HolidayCalendars]
   :param fill_missing_values: Strategy for filling in missing values.
   :type fill_missing_values: List[List[dict]]
   :param enable_clustering: Enable clustering in forecasting.
   :type enable_clustering: bool
   :param data_split_feature_group_table_name: Specify the table name of the feature group to export training data with the fold column.
   :type data_split_feature_group_table_name: str
   :param custom_loss_functions: Registered custom losses available for selection.
   :type custom_loss_functions: List[str]
   :param custom_metrics: Registered custom metrics available for selection.
   :type custom_metrics: List[str]
   :param return_fractional_forecasts: Use this to return fractional forecast values while prediction
   :param allow_training_with_small_history: Allows training with fewer than 100 rows in the dataset


   .. py:attribute:: prediction_length
      :type:  int
      :value: None



   .. py:attribute:: objective
      :type:  abacusai.api_class.enums.ForecastingObjective
      :value: None



   .. py:attribute:: sort_objective
      :type:  abacusai.api_class.enums.ForecastingObjective
      :value: None



   .. py:attribute:: forecast_frequency
      :type:  abacusai.api_class.enums.ForecastingFrequency
      :value: None



   .. py:attribute:: probability_quantiles
      :type:  List[float]
      :value: None



   .. py:attribute:: force_prediction_length
      :type:  bool
      :value: None



   .. py:attribute:: filter_items
      :type:  bool
      :value: None



   .. py:attribute:: enable_feature_selection
      :type:  bool
      :value: None



   .. py:attribute:: enable_padding
      :type:  bool
      :value: None



   .. py:attribute:: enable_cold_start
      :type:  bool
      :value: None



   .. py:attribute:: enable_multiple_backtests
      :type:  bool
      :value: None



   .. py:attribute:: num_backtesting_windows
      :type:  int
      :value: None



   .. py:attribute:: backtesting_window_step_size
      :type:  int
      :value: None



   .. py:attribute:: full_data_retraining
      :type:  bool
      :value: None



   .. py:attribute:: additional_forecast_keys
      :type:  List[str]
      :value: None



   .. py:attribute:: experimentation_mode
      :type:  abacusai.api_class.enums.ExperimentationMode
      :value: None



   .. py:attribute:: type_of_split
      :type:  abacusai.api_class.enums.ForecastingDataSplitType
      :value: None



   .. py:attribute:: test_by_item
      :type:  bool
      :value: None



   .. py:attribute:: test_start
      :type:  str
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: loss_function
      :type:  abacusai.api_class.enums.ForecastingLossFunction
      :value: None



   .. py:attribute:: underprediction_weight
      :type:  float
      :value: None



   .. py:attribute:: disable_networks_without_analytic_quantiles
      :type:  bool
      :value: None



   .. py:attribute:: initial_learning_rate
      :type:  float
      :value: None



   .. py:attribute:: l2_regularization_factor
      :type:  float
      :value: None



   .. py:attribute:: dropout_rate
      :type:  int
      :value: None



   .. py:attribute:: recurrent_layers
      :type:  int
      :value: None



   .. py:attribute:: recurrent_units
      :type:  int
      :value: None



   .. py:attribute:: convolutional_layers
      :type:  int
      :value: None



   .. py:attribute:: convolution_filters
      :type:  int
      :value: None



   .. py:attribute:: local_scaling_mode
      :type:  abacusai.api_class.enums.ForecastingLocalScaling
      :value: None



   .. py:attribute:: zero_predictor
      :type:  bool
      :value: None



   .. py:attribute:: skip_missing
      :type:  bool
      :value: None



   .. py:attribute:: batch_size
      :type:  abacusai.api_class.enums.BatchSize
      :value: None



   .. py:attribute:: batch_renormalization
      :type:  bool
      :value: None



   .. py:attribute:: history_length
      :type:  int
      :value: None



   .. py:attribute:: prediction_step_size
      :type:  int
      :value: None



   .. py:attribute:: training_point_overlap
      :type:  float
      :value: None



   .. py:attribute:: max_scale_context
      :type:  int
      :value: None



   .. py:attribute:: quantiles_extension_method
      :type:  abacusai.api_class.enums.ForecastingQuanitlesExtensionMethod
      :value: None



   .. py:attribute:: number_of_samples
      :type:  int
      :value: None



   .. py:attribute:: symmetrize_quantiles
      :type:  bool
      :value: None



   .. py:attribute:: use_log_transforms
      :type:  bool
      :value: None



   .. py:attribute:: smooth_history
      :type:  float
      :value: None



   .. py:attribute:: local_scale_target
      :type:  bool
      :value: None



   .. py:attribute:: use_clipping
      :type:  bool
      :value: None



   .. py:attribute:: timeseries_weight_column
      :type:  str
      :value: None



   .. py:attribute:: item_attributes_weight_column
      :type:  str
      :value: None



   .. py:attribute:: use_timeseries_weights_in_objective
      :type:  bool
      :value: None



   .. py:attribute:: use_item_weights_in_objective
      :type:  bool
      :value: None



   .. py:attribute:: skip_timeseries_weight_scaling
      :type:  bool
      :value: None



   .. py:attribute:: timeseries_loss_weight_column
      :type:  str
      :value: None



   .. py:attribute:: use_item_id
      :type:  bool
      :value: None



   .. py:attribute:: use_all_item_totals
      :type:  bool
      :value: None



   .. py:attribute:: handle_zeros_as_missing_values
      :type:  bool
      :value: None



   .. py:attribute:: datetime_holiday_calendars
      :type:  List[abacusai.api_class.enums.HolidayCalendars]
      :value: None



   .. py:attribute:: fill_missing_values
      :type:  List[List[dict]]
      :value: None



   .. py:attribute:: enable_clustering
      :type:  bool
      :value: None



   .. py:attribute:: data_split_feature_group_table_name
      :type:  str
      :value: None



   .. py:attribute:: custom_loss_functions
      :type:  List[str]
      :value: None



   .. py:attribute:: custom_metrics
      :type:  List[str]
      :value: None



   .. py:attribute:: return_fractional_forecasts
      :type:  bool
      :value: None



   .. py:attribute:: allow_training_with_small_history
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: NamedEntityExtractionTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the NAMED_ENTITY_EXTRACTION problem type

   :param llm_for_ner: LLM to use for NER from among available LLM
   :type llm_for_ner: NERForLLM
   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int
   :param test_row_indicator: Column indicating which rows to use for training (TRAIN) and testing (TEST).
   :type test_row_indicator: str
   :param active_labels_column: Entities that have been marked in a particular text
   :type active_labels_column: str
   :param document_format: Format of the input documents.
   :type document_format: NLPDocumentFormat
   :param minimum_bounding_box_overlap_ratio: Tokens are considered to belong to annotation if the user bounding box is provided and ratio of (token_bounding_box ∩ annotation_bounding_box) / token_bounding_area is greater than the provided value.
   :type minimum_bounding_box_overlap_ratio: float
   :param save_predicted_pdf: Whether to save predicted PDF documents
   :type save_predicted_pdf: bool
   :param enhanced_ocr: Enhanced text extraction from predicted digital documents
   :type enhanced_ocr: bool
   :param additional_extraction_instructions: Additional instructions to guide the LLM in extracting the entities. Only used with LLM algorithms.
   :type additional_extraction_instructions: str


   .. py:attribute:: llm_for_ner
      :type:  abacusai.api_class.enums.LLMName
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: test_row_indicator
      :type:  str
      :value: None



   .. py:attribute:: active_labels_column
      :type:  str
      :value: None



   .. py:attribute:: document_format
      :type:  abacusai.api_class.enums.NLPDocumentFormat
      :value: None



   .. py:attribute:: minimum_bounding_box_overlap_ratio
      :type:  float
      :value: 0.0



   .. py:attribute:: save_predicted_pdf
      :type:  bool
      :value: True



   .. py:attribute:: enhanced_ocr
      :type:  bool
      :value: False



   .. py:attribute:: additional_extraction_instructions
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: NaturalLanguageSearchTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the NATURAL_LANGUAGE_SEARCH problem type

   :param abacus_internal_model: Use a Abacus.AI LLM to answer questions about your data without using any external APIs
   :type abacus_internal_model: bool
   :param num_completion_tokens: Default for maximum number of tokens for chat answers. Reducing this will get faster responses which are more succinct
   :type num_completion_tokens: int
   :param larger_embeddings: Use a higher dimension embedding model.
   :type larger_embeddings: bool
   :param search_chunk_size: Chunk size for indexing the documents.
   :type search_chunk_size: int
   :param chunk_overlap_fraction: Overlap in chunks while indexing the documents.
   :type chunk_overlap_fraction: float
   :param index_fraction: Fraction of the chunk to use for indexing.
   :type index_fraction: float


   .. py:attribute:: abacus_internal_model
      :type:  bool
      :value: None



   .. py:attribute:: num_completion_tokens
      :type:  int
      :value: None



   .. py:attribute:: larger_embeddings
      :type:  bool
      :value: None



   .. py:attribute:: search_chunk_size
      :type:  int
      :value: None



   .. py:attribute:: index_fraction
      :type:  float
      :value: None



   .. py:attribute:: chunk_overlap_fraction
      :type:  float
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ChatLLMTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the CHAT_LLM problem type

   :param document_retrievers: List of names or IDs of document retrievers to use as vector stores of information for RAG responses.
   :type document_retrievers: List[str]
   :param num_completion_tokens: Default for maximum number of tokens for chat answers. Reducing this will get faster responses which are more succinct.
   :type num_completion_tokens: int
   :param temperature: The generative LLM temperature.
   :type temperature: float
   :param retrieval_columns: Include the metadata column values in the retrieved search results.
   :type retrieval_columns: list
   :param filter_columns: Allow users to filter the document retrievers on these metadata columns.
   :type filter_columns: list
   :param include_general_knowledge: Allow the LLM to rely not just on RAG search results, but to fall back on general knowledge. Disabled by default.
   :type include_general_knowledge: bool
   :param enable_web_search: Allow the LLM to use Web Search Engines to retrieve information for better results.
   :type enable_web_search: bool
   :param behavior_instructions: Customize the overall behaviour of the model. This controls things like - when to execute code (if enabled), write sql query, search web (if enabled), etc.
   :type behavior_instructions: str
   :param response_instructions: Customized instructions for how the model should respond inlcuding the format, persona and tone of the answers.
   :type response_instructions: str
   :param enable_llm_rewrite: If enabled, an LLM will rewrite the RAG queries sent to document retriever. Disabled by default.
   :type enable_llm_rewrite: bool
   :param column_filtering_instructions: Instructions for a LLM call to automatically generate filter expressions on document metadata to retrieve relevant documents for the conversation.
   :type column_filtering_instructions: str
   :param keyword_requirement_instructions: Instructions for a LLM call to automatically generate keyword requirements to retrieve relevant documents for the conversation.
   :type keyword_requirement_instructions: str
   :param query_rewrite_instructions: Special instructions for the LLM which rewrites the RAG query.
   :type query_rewrite_instructions: str
   :param max_search_results: Maximum number of search results in the retrieval augmentation step. If we know that the questions are likely to have snippets which are easily matched in the documents, then a lower number will help with accuracy.
   :type max_search_results: int
   :param data_feature_group_ids: (List[str]): List of feature group IDs to use to possibly query for the ChatLLM. The created ChatLLM is commonly referred to as DataLLM.
   :param data_prompt_context: Prompt context for the data feature group IDs.
   :type data_prompt_context: str
   :param data_prompt_table_context: Dict of table name and table context pairs to provide table wise context for each structured data table.
   :type data_prompt_table_context: Dict[str, str]
   :param data_prompt_column_context: Dict of 'table_name.column_name' and 'column_context' pairs to provide column context for some selected columns in the selected structured data table. This replaces the default auto-generated information about the column data.
   :type data_prompt_column_context: Dict[str, str]
   :param hide_sql_and_code: When running data queries, this will hide the generated SQL and Code in the response.
   :type hide_sql_and_code: bool
   :param disable_data_summarization: After executing a query summarize the reponse and reply back with only the table and query run.
   :type disable_data_summarization: bool
   :param data_columns_to_ignore: Columns to ignore while encoding information about structured data tables in context for the LLM. A list of strings of format "<table_name>.<column_name>"
   :type data_columns_to_ignore: List[str]
   :param search_score_cutoff: Minimum search score to consider a document as a valid search result.
   :type search_score_cutoff: float
   :param include_bm25_retrieval: Combine BM25 search score with vector search using reciprocal rank fusion.
   :type include_bm25_retrieval: bool
   :param database_connector_id: Database connector ID to use for connecting external database that gives access to structured data to the LLM.
   :type database_connector_id: str
   :param database_connector_tables: List of tables to use from the database connector for the ChatLLM.
   :type database_connector_tables: List[str]
   :param enable_code_execution: Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
   :type enable_code_execution: bool
   :param enable_response_caching: Enable caching of LLM responses to speed up response times and improve reproducibility.
   :type enable_response_caching: bool
   :param unknown_answer_phrase: Fallback response when the LLM can't find an answer.
   :type unknown_answer_phrase: str
   :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc.
   :type enable_tool_bar: bool
   :param enable_inline_source_citations: Enable inline citations of the sources in the response.
   :type enable_inline_source_citations: bool
   :param response_format: (str): When set to 'JSON', the LLM will generate a JSON formatted string.
   :param json_response_instructions: Instructions to be followed while generating the json_response if `response_format` is set to "JSON". This can include the schema information if the schema is dynamic and its keys cannot be pre-determined.
   :type json_response_instructions: str
   :param json_response_schema: Specifies the JSON schema that the model should adhere to if `response_format` is set to "JSON". This should be a json-formatted string where each field of the expected schema is mapped to a dictionary containing the fields 'type', 'required' and 'description'. For example - '{"sample_field": {"type": "integer", "required": true, "description": "Sample Field"}}'
   :type json_response_schema: str
   :param mask_pii: Mask PII in the prompts and uploaded documents before sending it to the LLM.
   :type mask_pii: bool
   :param custom_tools: List of custom tool names to be used in the chat.
   :type custom_tools: List[str]


   .. py:attribute:: document_retrievers
      :type:  List[str]
      :value: None



   .. py:attribute:: num_completion_tokens
      :type:  int
      :value: None



   .. py:attribute:: temperature
      :type:  float
      :value: None



   .. py:attribute:: retrieval_columns
      :type:  list
      :value: None



   .. py:attribute:: filter_columns
      :type:  list
      :value: None



   .. py:attribute:: include_general_knowledge
      :type:  bool
      :value: None



   .. py:attribute:: enable_web_search
      :type:  bool
      :value: None



   .. py:attribute:: behavior_instructions
      :type:  str
      :value: None



   .. py:attribute:: response_instructions
      :type:  str
      :value: None



   .. py:attribute:: enable_llm_rewrite
      :type:  bool
      :value: None



   .. py:attribute:: column_filtering_instructions
      :type:  str
      :value: None



   .. py:attribute:: keyword_requirement_instructions
      :type:  str
      :value: None



   .. py:attribute:: query_rewrite_instructions
      :type:  str
      :value: None



   .. py:attribute:: max_search_results
      :type:  int
      :value: None



   .. py:attribute:: data_feature_group_ids
      :type:  List[str]
      :value: None



   .. py:attribute:: data_prompt_context
      :type:  str
      :value: None



   .. py:attribute:: data_prompt_table_context
      :type:  Dict[str, str]
      :value: None



   .. py:attribute:: data_prompt_column_context
      :type:  Dict[str, str]
      :value: None



   .. py:attribute:: hide_sql_and_code
      :type:  bool
      :value: None



   .. py:attribute:: disable_data_summarization
      :type:  bool
      :value: None



   .. py:attribute:: data_columns_to_ignore
      :type:  List[str]
      :value: None



   .. py:attribute:: search_score_cutoff
      :type:  float
      :value: None



   .. py:attribute:: include_bm25_retrieval
      :type:  bool
      :value: None



   .. py:attribute:: database_connector_id
      :type:  str
      :value: None



   .. py:attribute:: database_connector_tables
      :type:  List[str]
      :value: None



   .. py:attribute:: enable_code_execution
      :type:  bool
      :value: None



   .. py:attribute:: metadata_columns
      :type:  list
      :value: None



   .. py:attribute:: lookup_rewrite_instructions
      :type:  str
      :value: None



   .. py:attribute:: enable_response_caching
      :type:  bool
      :value: None



   .. py:attribute:: unknown_answer_phrase
      :type:  str
      :value: None



   .. py:attribute:: enable_tool_bar
      :type:  bool
      :value: None



   .. py:attribute:: enable_inline_source_citations
      :type:  bool
      :value: None



   .. py:attribute:: response_format
      :type:  str
      :value: None



   .. py:attribute:: json_response_instructions
      :type:  str
      :value: None



   .. py:attribute:: json_response_schema
      :type:  str
      :value: None



   .. py:attribute:: mask_pii
      :type:  bool
      :value: None



   .. py:attribute:: custom_tools
      :type:  List[str]
      :value: None



   .. py:method:: __post_init__()


.. py:class:: SentenceBoundaryDetectionTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the SENTENCE_BOUNDARY_DETECTION problem type

   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int
   :param dropout_rate: Dropout rate for neural network.
   :type dropout_rate: float
   :param batch_size: Batch size for neural network.
   :type batch_size: BatchSize


   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: dropout_rate
      :type:  float
      :value: None



   .. py:attribute:: batch_size
      :type:  abacusai.api_class.enums.BatchSize
      :value: None



   .. py:method:: __post_init__()


.. py:class:: SentimentDetectionTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the SENTIMENT_DETECTION problem type

   :param sentiment_type: Type of sentiment to detect.
   :type sentiment_type: SentimentType
   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int


   .. py:attribute:: sentiment_type
      :type:  abacusai.api_class.enums.SentimentType
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: DocumentClassificationTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the DOCUMENT_CLASSIFICATION problem type

   :param zero_shot_hypotheses: Zero shot hypotheses. Example text: 'This text is about pricing'.
   :type zero_shot_hypotheses: List[str]
   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int


   .. py:attribute:: zero_shot_hypotheses
      :type:  List[str]
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: DocumentSummarizationTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the DOCUMENT_SUMMARIZATION problem type

   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int
   :param dropout_rate: Dropout rate for neural network.
   :type dropout_rate: float
   :param batch_size: Batch size for neural network.
   :type batch_size: BatchSize


   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: dropout_rate
      :type:  float
      :value: None



   .. py:attribute:: batch_size
      :type:  abacusai.api_class.enums.BatchSize
      :value: None



   .. py:method:: __post_init__()


.. py:class:: DocumentVisualizationTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the DOCUMENT_VISUALIZATION problem type

   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int
   :param dropout_rate: Dropout rate for neural network.
   :type dropout_rate: float
   :param batch_size: Batch size for neural network.
   :type batch_size: BatchSize


   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: dropout_rate
      :type:  float
      :value: None



   .. py:attribute:: batch_size
      :type:  abacusai.api_class.enums.BatchSize
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ClusteringTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the CLUSTERING problem type

   :param num_clusters_selection: Number of clusters. If None, will be selected automatically.
   :type num_clusters_selection: int


   .. py:attribute:: num_clusters_selection
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ClusteringTimeseriesTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the CLUSTERING_TIMESERIES problem type

   :param num_clusters_selection: Number of clusters. If None, will be selected automatically.
   :type num_clusters_selection: int
   :param imputation: Imputation method for missing values.
   :type imputation: ClusteringImputationMethod


   .. py:attribute:: num_clusters_selection
      :type:  int
      :value: None



   .. py:attribute:: imputation
      :type:  abacusai.api_class.enums.ClusteringImputationMethod
      :value: None



   .. py:method:: __post_init__()


.. py:class:: EventAnomalyTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the EVENT_ANOMALY problem type

   :param anomaly_fraction: The fraction of the dataset to classify as anomalous, between 0 and 0.5
   :type anomaly_fraction: float


   .. py:attribute:: anomaly_fraction
      :type:  float
      :value: None



   .. py:method:: __post_init__()


.. py:class:: TimeseriesAnomalyTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the TS_ANOMALY problem type

   :param type_of_split: Type of data splitting into train/test.
   :type type_of_split: TimeseriesAnomalyDataSplitType
   :param test_start: Limit training data to dates before the given test start.
   :type test_start: str
   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int
   :param fill_missing_values: strategies to fill missing values and missing timestamps
   :type fill_missing_values: List[List[dict]]
   :param handle_zeros_as_missing_values: If True, handle zero values in numeric columns as missing data
   :type handle_zeros_as_missing_values: bool
   :param timeseries_frequency: set this to control frequency of filling missing values
   :type timeseries_frequency: str
   :param min_samples_in_normal_region: Adjust this to fine-tune the number of anomalies to be identified.
   :type min_samples_in_normal_region: int
   :param anomaly_type: select what kind of peaks to detect as anomalies
   :type anomaly_type: TimeseriesAnomalyTypeOfAnomaly
   :param hyperparameter_calculation_with_heuristics: Enable heuristic calculation to get hyperparameters for the model
   :type hyperparameter_calculation_with_heuristics: TimeseriesAnomalyUseHeuristic
   :param threshold_score: Threshold score for anomaly detection
   :type threshold_score: float
   :param additional_anomaly_ids: List of categorical columns that can act as multi-identifier
   :type additional_anomaly_ids: List[str]


   .. py:attribute:: type_of_split
      :type:  abacusai.api_class.enums.TimeseriesAnomalyDataSplitType
      :value: None



   .. py:attribute:: test_start
      :type:  str
      :value: None



   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: fill_missing_values
      :type:  List[List[dict]]
      :value: None



   .. py:attribute:: handle_zeros_as_missing_values
      :type:  bool
      :value: None



   .. py:attribute:: timeseries_frequency
      :type:  str
      :value: None



   .. py:attribute:: min_samples_in_normal_region
      :type:  int
      :value: None



   .. py:attribute:: anomaly_type
      :type:  abacusai.api_class.enums.TimeseriesAnomalyTypeOfAnomaly
      :value: None



   .. py:attribute:: hyperparameter_calculation_with_heuristics
      :type:  abacusai.api_class.enums.TimeseriesAnomalyUseHeuristic
      :value: None



   .. py:attribute:: threshold_score
      :type:  float
      :value: None



   .. py:attribute:: additional_anomaly_ids
      :type:  List[str]
      :value: None



   .. py:method:: __post_init__()


.. py:class:: CumulativeForecastingTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the CUMULATIVE_FORECASTING problem type

   :param test_split: Percent of dataset to use for test data. We support using a range between 5 ( i.e. 5% ) to 20 ( i.e. 20% ) of your dataset.
   :type test_split: int
   :param historical_frequency: Forecast frequency
   :type historical_frequency: str
   :param cumulative_prediction_lengths: List of Cumulative Prediction Frequencies. Each prediction length must be between 1 and 365.
   :type cumulative_prediction_lengths: List[int]
   :param skip_input_transform: Avoid doing numeric scaling transformations on the input.
   :type skip_input_transform: bool
   :param skip_target_transform: Avoid doing numeric scaling transformations on the target.
   :type skip_target_transform: bool
   :param predict_residuals: Predict residuals instead of totals at each prediction step.
   :type predict_residuals: bool


   .. py:attribute:: test_split
      :type:  int
      :value: None



   .. py:attribute:: historical_frequency
      :type:  str
      :value: None



   .. py:attribute:: cumulative_prediction_lengths
      :type:  List[int]
      :value: None



   .. py:attribute:: skip_input_transform
      :type:  bool
      :value: None



   .. py:attribute:: skip_target_transform
      :type:  bool
      :value: None



   .. py:attribute:: predict_residuals
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: ThemeAnalysisTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the THEME ANALYSIS problem type


   .. py:method:: __post_init__()


.. py:class:: AIAgentTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the AI_AGENT problem type

   :param description: Description of the agent function.
   :type description: str
   :param agent_interface: The interface that the agent will be deployed with.
   :type agent_interface: AgentInterface
   :param agent_connectors: (List[enums.ApplicationConnectorType]): The connectors needed for the agent to function.


   .. py:attribute:: description
      :type:  str
      :value: None



   .. py:attribute:: agent_interface
      :type:  abacusai.api_class.enums.AgentInterface
      :value: None



   .. py:attribute:: agent_connectors
      :type:  List[abacusai.api_class.enums.ApplicationConnectorType]
      :value: None



   .. py:attribute:: enable_binary_input
      :type:  bool
      :value: None



   .. py:attribute:: agent_input_schema
      :type:  dict
      :value: None



   .. py:attribute:: agent_output_schema
      :type:  dict
      :value: None



   .. py:method:: __post_init__()


.. py:class:: CustomTrainedModelTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the CUSTOM_TRAINED_MODEL problem type

   :param max_catalog_size: Maximum expected catalog size.
   :type max_catalog_size: int
   :param max_dimension: Maximum expected dimension of the catalog.
   :type max_dimension: int
   :param index_output_path: Fully qualified cloud location (GCS, S3, etc) to export snapshots of the embedding to.
   :type index_output_path: str
   :param docker_image_uri: Docker image URI.
   :type docker_image_uri: str
   :param service_port: Service port.
   :type service_port: int
   :param streaming_embeddings: Flag to enable streaming embeddings.
   :type streaming_embeddings: bool


   .. py:attribute:: max_catalog_size
      :type:  int
      :value: None



   .. py:attribute:: max_dimension
      :type:  int
      :value: None



   .. py:attribute:: index_output_path
      :type:  str
      :value: None



   .. py:attribute:: docker_image_uri
      :type:  str
      :value: None



   .. py:attribute:: service_port
      :type:  int
      :value: None



   .. py:attribute:: streaming_embeddings
      :type:  bool
      :value: None



   .. py:method:: __post_init__()


.. py:class:: CustomAlgorithmTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the CUSTOM_ALGORITHM problem type

   :param timeout_minutes: Timeout for the model training in minutes.
   :type timeout_minutes: int


   .. py:attribute:: timeout_minutes
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: OptimizationTrainingConfig

   Bases: :py:obj:`TrainingConfig`


   Training config for the OPTIMIZATION problem type

   :param solve_time_limit: The maximum time in seconds to spend solving the problem. Accepts values between 0 and 86400.
   :type solve_time_limit: float
   :param optimality_gap_limit: The stopping optimality gap limit. Optimality gap is fractional difference between the best known solution and the best possible solution. Accepts values between 0 and 1.
   :type optimality_gap_limit: float
   :param include_all_partitions: Include all partitions in the model training. Default is False.
   :type include_all_partitions: bool
   :param include_specific_partitions: Include specific partitions in partitioned model training. Default is empty list.
   :type include_specific_partitions: List[str]


   .. py:attribute:: solve_time_limit
      :type:  float
      :value: None



   .. py:attribute:: optimality_gap_limit
      :type:  float
      :value: None



   .. py:attribute:: include_all_partitions
      :type:  bool
      :value: None



   .. py:attribute:: include_specific_partitions
      :type:  List[str]
      :value: None



   .. py:method:: __post_init__()


.. py:class:: _TrainingConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'problem_type'



   .. py:attribute:: config_class_map


.. py:class:: DeployableAlgorithm

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Algorithm that can be deployed to a model.

   :param algorithm: ID of the algorithm.
   :type algorithm: str
   :param name: Name of the algorithm.
   :type name: str
   :param only_offline_deployable: Whether the algorithm can only be deployed offline.
   :type only_offline_deployable: bool
   :param trained_model_types: List of trained model types.
   :type trained_model_types: List[dict]


   .. py:attribute:: algorithm
      :type:  str
      :value: None



   .. py:attribute:: name
      :type:  str
      :value: None



   .. py:attribute:: only_offline_deployable
      :type:  bool
      :value: None



   .. py:attribute:: trained_model_types
      :type:  List[dict]
      :value: None



.. py:class:: TimeWindowConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Time Window Configuration

   :param window_duration: The duration of the window.
   :type window_duration: int
   :param window_from_start: Whether the window should be from the start of the time series.
   :type window_from_start: bool


   .. py:attribute:: window_duration
      :type:  int
      :value: None



   .. py:attribute:: window_from_start
      :type:  bool
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: ForecastingMonitorConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Forecasting Monitor Configuration

   :param id_column: The name of the column that contains the unique identifier for the time series.
   :type id_column: str
   :param timestamp_column: The name of the column that contains the timestamp for the time series.
   :type timestamp_column: str
   :param target_column: The name of the column that contains the target value for the time series.
   :type target_column: str
   :param start_time: The start time of the time series data.
   :type start_time: str
   :param end_time: The end time of the time series data.
   :type end_time: str
   :param window_config: The windowing configuration for the time series data.
   :type window_config: TimeWindowConfig


   .. py:attribute:: id_column
      :type:  str
      :value: None



   .. py:attribute:: timestamp_column
      :type:  str
      :value: None



   .. py:attribute:: target_column
      :type:  str
      :value: None



   .. py:attribute:: start_time
      :type:  str
      :value: None



   .. py:attribute:: end_time
      :type:  str
      :value: None



   .. py:attribute:: window_config
      :type:  TimeWindowConfig
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: StdDevThreshold

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Std Dev Threshold types

   :param threshold_type: Type of threshold to apply to the item attributes.
   :type threshold_type: StdDevThresholdType
   :param value: Value to use for the threshold.
   :type value: float


   .. py:attribute:: threshold_type
      :type:  abacusai.api_class.enums.StdDevThresholdType
      :value: None



   .. py:attribute:: value
      :type:  float
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: ItemAttributesStdDevThreshold

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Item Attributes Std Dev Threshold for Monitor Alerts

   :param lower_bound: Lower bound for the item attributes.
   :type lower_bound: StdDevThreshold
   :param upper_bound: Upper bound for the item attributes.
   :type upper_bound: StdDevThreshold


   .. py:attribute:: lower_bound
      :type:  StdDevThreshold
      :value: None



   .. py:attribute:: upper_bound
      :type:  StdDevThreshold
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: RestrictFeatureMappings

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Restrict Feature Mappings for Monitor Filtering

   :param feature_name: The name of the feature to restrict the monitor to.
   :type feature_name: str
   :param restricted_feature_values: The values of the feature to restrict the monitor to if feature is a categorical.
   :type restricted_feature_values: list
   :param start_time: The start time of the timestamp feature to filter from
   :type start_time: str
   :param end_time: The end time of the timestamp feature to filter until
   :type end_time: str
   :param min_value: Value to filter the numerical feature above
   :type min_value: float
   :param max_value: Filtering the numerical feature to below this value
   :type max_value: float


   .. py:attribute:: feature_name
      :type:  str
      :value: None



   .. py:attribute:: restricted_feature_values
      :type:  list
      :value: []



   .. py:attribute:: start_time
      :type:  str
      :value: None



   .. py:attribute:: end_time
      :type:  str
      :value: None



   .. py:attribute:: min_value
      :type:  float
      :value: None



   .. py:attribute:: max_value
      :type:  float
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: MonitorFilteringConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Monitor Filtering Configuration

   :param start_time: The start time of the prediction time col
   :type start_time: str
   :param end_time: The end time of the prediction time col
   :type end_time: str
   :param restrict_feature_mappings: The feature mapping to restrict the monitor to.
   :type restrict_feature_mappings: RestrictFeatureMappings
   :param target_class: The target class to restrict the monitor to.
   :type target_class: str
   :param train_target_feature: Set the target feature for the training data.
   :type train_target_feature: str
   :param prediction_target_feature: Set the target feature for the prediction data.
   :type prediction_target_feature: str


   .. py:attribute:: start_time
      :type:  str
      :value: None



   .. py:attribute:: end_time
      :type:  str
      :value: None



   .. py:attribute:: restrict_feature_mappings
      :type:  List[RestrictFeatureMappings]
      :value: None



   .. py:attribute:: target_class
      :type:  str
      :value: None



   .. py:attribute:: train_target_feature
      :type:  str
      :value: None



   .. py:attribute:: prediction_target_feature
      :type:  str
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: AlertConditionConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for alert condition configs


   .. py:attribute:: alert_type
      :type:  abacusai.api_class.enums.MonitorAlertType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: AccuracyBelowThresholdConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   Accuracy Below Threshold Condition Config for Monitor Alerts

   :param threshold: Threshold for when to consider a column to be in violation. The alert will only fire when the drift value is strictly greater than the threshold.
   :type threshold: float


   .. py:attribute:: threshold
      :type:  float
      :value: None



   .. py:method:: __post_init__()


.. py:class:: FeatureDriftConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   Feature Drift Condition Config for Monitor Alerts

   :param feature_drift_type: Feature drift type to apply the threshold on to determine whether a column has drifted significantly enough to be a violation.
   :type feature_drift_type: FeatureDriftType
   :param threshold: Threshold for when to consider a column to be in violation. The alert will only fire when the drift value is strictly greater than the threshold.
   :type threshold: float
   :param minimum_violations: Number of columns that must exceed the specified threshold to trigger an alert.
   :type minimum_violations: int
   :param feature_names: List of feature names to monitor for this alert.
   :type feature_names: List[str]


   .. py:attribute:: feature_drift_type
      :type:  abacusai.api_class.enums.FeatureDriftType
      :value: None



   .. py:attribute:: threshold
      :type:  float
      :value: None



   .. py:attribute:: minimum_violations
      :type:  int
      :value: None



   .. py:attribute:: feature_names
      :type:  List[str]
      :value: None



   .. py:method:: __post_init__()


.. py:class:: TargetDriftConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   Target Drift Condition Config for Monitor Alerts

   :param feature_drift_type: Target drift type to apply the threshold on to determine whether a column has drifted significantly enough to be a violation.
   :type feature_drift_type: FeatureDriftType
   :param threshold: Threshold for when to consider the target column to be in violation. The alert will only fire when the drift value is strictly greater than the threshold.
   :type threshold: float


   .. py:attribute:: feature_drift_type
      :type:  abacusai.api_class.enums.FeatureDriftType
      :value: None



   .. py:attribute:: threshold
      :type:  float
      :value: None



   .. py:method:: __post_init__()


.. py:class:: HistoryLengthDriftConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   History Length Drift Condition Config for Monitor Alerts

   :param feature_drift_type: History length drift type to apply the threshold on to determine whether the history length has drifted significantly enough to be a violation.
   :type feature_drift_type: FeatureDriftType
   :param threshold: Threshold for when to consider the history length  to be in violation. The alert will only fire when the drift value is strictly greater than the threshold.
   :type threshold: float


   .. py:attribute:: feature_drift_type
      :type:  abacusai.api_class.enums.FeatureDriftType
      :value: None



   .. py:attribute:: threshold
      :type:  float
      :value: None



   .. py:method:: __post_init__()


.. py:class:: DataIntegrityViolationConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   Data Integrity Violation Condition Config for Monitor Alerts

   :param data_integrity_type: This option selects the data integrity violations to monitor for this alert.
   :type data_integrity_type: DataIntegrityViolationType
   :param minimum_violations: Number of columns that must exceed the specified threshold to trigger an alert.
   :type minimum_violations: int


   .. py:attribute:: data_integrity_type
      :type:  abacusai.api_class.enums.DataIntegrityViolationType
      :value: None



   .. py:attribute:: minimum_violations
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: BiasViolationConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   Bias Violation Condition Config for Monitor Alerts

   :param bias_type: This option selects the bias metric to monitor for this alert.
   :type bias_type: BiasType
   :param threshold: Threshold for when to consider a column to be in violation. The alert will only fire when the drift value is strictly greater than the threshold.
   :type threshold: float
   :param minimum_violations: Number of columns that must exceed the specified threshold to trigger an alert.
   :type minimum_violations: int


   .. py:attribute:: bias_type
      :type:  abacusai.api_class.enums.BiasType
      :value: None



   .. py:attribute:: threshold
      :type:  float
      :value: None



   .. py:attribute:: minimum_violations
      :type:  int
      :value: None



   .. py:method:: __post_init__()


.. py:class:: PredictionCountConditionConfig

   Bases: :py:obj:`AlertConditionConfig`


   Deployment Prediction Condition Config for Deployment Alerts. By default we monitor if predictions made over a time window has reduced significantly.
   :param threshold: Threshold for when to consider to be a violation. Negative means alert on reduction, positive means alert on increase.
   :type threshold: float
   :param aggregation_window: Time window to aggregate the predictions over, e.g. 1h, 10m. Only h(hour), m(minute) and s(second) are supported.
   :type aggregation_window: str
   :param aggregation_type: Aggregation type to use for the aggregation window, e.g. sum, avg.
   :type aggregation_type: str


   .. py:attribute:: threshold
      :type:  float
      :value: None



   .. py:attribute:: aggregation_window
      :type:  str
      :value: None



   .. py:attribute:: aggregation_type
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: _AlertConditionConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'alert_type'



   .. py:attribute:: config_class_key_value_camel_case
      :value: True



   .. py:attribute:: config_class_map


.. py:class:: AlertActionConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for alert action configs


   .. py:attribute:: action_type
      :type:  abacusai.api_class.enums.AlertActionType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: EmailActionConfig

   Bases: :py:obj:`AlertActionConfig`


   Email Action Config for Monitor Alerts

   :param email_recipients: List of email addresses to send the alert to.
   :type email_recipients: List[str]
   :param email_body: Body of the email to send.
   :type email_body: str


   .. py:attribute:: email_recipients
      :type:  List[str]
      :value: None



   .. py:attribute:: email_body
      :type:  str
      :value: None



   .. py:method:: __post_init__()


.. py:class:: _AlertActionConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'action_type'



   .. py:attribute:: config_class_map


.. py:class:: MonitorThresholdConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Monitor Threshold Config for Monitor Alerts

   :param drift_type: Feature drift type to apply the threshold on to determine whether a column has drifted significantly enough to be a violation.
   :type drift_type: FeatureDriftType
   :param threshold_config: Thresholds for when to consider a column to be in violation. The alert will only fire when the drift value is strictly greater than the threshold.
   :type threshold_config: ThresholdConfigs


   .. py:attribute:: drift_type
      :type:  abacusai.api_class.enums.FeatureDriftType
      :value: None



   .. py:attribute:: at_risk_threshold
      :type:  float
      :value: None



   .. py:attribute:: severely_drifting_threshold
      :type:  float
      :value: None



   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: FeatureMappingConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Feature mapping configuration for a feature group type.

   :param feature_name: The name of the feature in the feature group.
   :type feature_name: str
   :param feature_mapping: The desired feature mapping for the feature.
   :type feature_mapping: str
   :param nested_feature_name: The name of the nested feature in the feature group.
   :type nested_feature_name: str


   .. py:attribute:: feature_name
      :type:  str


   .. py:attribute:: feature_mapping
      :type:  str
      :value: None



   .. py:attribute:: nested_feature_name
      :type:  str
      :value: None



.. py:class:: ProjectFeatureGroupTypeMappingsConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Project feature group type mappings.

   :param feature_group_id: The unique identifier for the feature group.
   :type feature_group_id: str
   :param feature_group_type: The feature group type.
   :type feature_group_type: str
   :param feature_mappings: The feature mappings for the feature group.
   :type feature_mappings: List[FeatureMappingConfig]


   .. py:attribute:: feature_group_id
      :type:  str


   .. py:attribute:: feature_group_type
      :type:  str
      :value: None



   .. py:attribute:: feature_mappings
      :type:  List[FeatureMappingConfig]


   .. py:method:: from_dict(input_dict)
      :classmethod:



.. py:class:: ConstraintConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   Constraint configuration.

   :param constant: The constant value for the constraint.
   :type constant: float
   :param operator: The operator for the constraint. Could be 'EQ', 'LE', 'GE'
   :type operator: str
   :param enforcement: The enforcement for the constraint. Could be 'HARD' or 'SOFT' or 'SKIP'. Default is 'HARD'
   :type enforcement: str
   :param code: The code for the constraint.
   :type code: str
   :param penalty: The penalty for violating the constraint.
   :type penalty: float


   .. py:attribute:: constant
      :type:  float


   .. py:attribute:: operator
      :type:  str


   .. py:attribute:: enforcement
      :type:  Optional[str]
      :value: None



   .. py:attribute:: code
      :type:  Optional[str]
      :value: None



   .. py:attribute:: penalty
      :type:  Optional[float]
      :value: None



.. py:class:: ProjectFeatureGroupConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for project feature group configuration.


   .. py:attribute:: type
      :type:  abacusai.api_class.enums.ProjectConfigType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: ConstraintProjectFeatureGroupConfig

   Bases: :py:obj:`ProjectFeatureGroupConfig`


   Constraint project feature group configuration.

   :param constraints: The constraint for the feature group. Should be a list of one ConstraintConfig.
   :type constraints: List[ConstraintConfig]


   .. py:attribute:: constraints
      :type:  List[ConstraintConfig]


   .. py:method:: __post_init__()


.. py:class:: ReviewModeProjectFeatureGroupConfig

   Bases: :py:obj:`ProjectFeatureGroupConfig`


   Review mode project feature group configuration.

   :param is_review_mode: The review mode for the feature group.
   :type is_review_mode: bool


   .. py:attribute:: is_review_mode
      :type:  bool


   .. py:method:: __post_init__()


.. py:class:: _ProjectFeatureGroupConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'type'



   .. py:attribute:: config_class_map


.. py:class:: PythonFunctionArgument

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   A config class for python function arguments

   :param variable_type: The type of the python function argument
   :type variable_type: PythonFunctionArgumentType
   :param name: The name of the python function variable
   :type name: str
   :param is_required: Whether the argument is required
   :type is_required: bool
   :param value: The value of the argument
   :type value: Any
   :param pipeline_variable: The name of the pipeline variable to use as the value
   :type pipeline_variable: str
   :param description: The description of the argument
   :type description: str
   :param item_type: Type of items when variable_type is LIST
   :type item_type: str


   .. py:attribute:: variable_type
      :type:  abacusai.api_class.enums.PythonFunctionArgumentType
      :value: None



   .. py:attribute:: name
      :type:  str
      :value: None



   .. py:attribute:: is_required
      :type:  bool
      :value: True



   .. py:attribute:: value
      :type:  Any
      :value: None



   .. py:attribute:: pipeline_variable
      :type:  str
      :value: None



   .. py:attribute:: description
      :type:  str
      :value: None



   .. py:attribute:: item_type
      :type:  str
      :value: None



.. py:class:: OutputVariableMapping

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   A config class for python function arguments

   :param variable_type: The type of the python function output argument
   :type variable_type: PythonFunctionOutputArgumentType
   :param name: The name of the python function variable
   :type name: str


   .. py:attribute:: variable_type
      :type:  abacusai.api_class.enums.PythonFunctionOutputArgumentType
      :value: None



   .. py:attribute:: name
      :type:  str
      :value: None



.. py:class:: FeatureGroupExportConfig

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   An abstract class for feature group exports.


   .. py:attribute:: connector_type
      :type:  abacusai.api_class.enums.ConnectorType
      :value: None



   .. py:method:: _get_builder()
      :classmethod:



.. py:class:: FileConnectorExportConfig

   Bases: :py:obj:`FeatureGroupExportConfig`


   File connector export config for feature groups

   :param location: The location to export the feature group to
   :type location: str
   :param export_file_format: The file format to export the feature group to
   :type export_file_format: str


   .. py:attribute:: location
      :type:  str
      :value: None



   .. py:attribute:: export_file_format
      :type:  str
      :value: None



   .. py:method:: __post_init__()


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: DatabaseConnectorExportConfig

   Bases: :py:obj:`FeatureGroupExportConfig`


   Database connector export config for feature groups

   :param database_connector_id: The ID of the database connector to export the feature group to
   :type database_connector_id: str
   :param mode: The mode to export the feature group in
   :type mode: str
   :param object_name: The name of the object to export the feature group to
   :type object_name: str
   :param id_column: The name of the ID column
   :type id_column: str
   :param additional_id_columns: Additional ID columns
   :type additional_id_columns: List[str]
   :param data_columns: The data columns to export the feature group to
   :type data_columns: Dict[str, str]


   .. py:attribute:: database_connector_id
      :type:  str
      :value: None



   .. py:attribute:: mode
      :type:  str
      :value: None



   .. py:attribute:: object_name
      :type:  str
      :value: None



   .. py:attribute:: id_column
      :type:  str
      :value: None



   .. py:attribute:: additional_id_columns
      :type:  List[str]
      :value: None



   .. py:attribute:: data_columns
      :type:  Dict[str, str]
      :value: None



   .. py:method:: __post_init__()


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: _FeatureGroupExportConfigFactory

   Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`


   Helper class that provides a standard way to create an ABC using
   inheritance.


   .. py:attribute:: config_abstract_class


   .. py:attribute:: config_class_key
      :value: 'connector_type'



   .. py:attribute:: config_class_map


.. py:class:: ResponseSection

   Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`


   A response section that an agent can return to render specific UI elements.

   :param type: The type of the response.
   :type type: ResponseSectionType
   :param id: The section key of the segment.
   :type id: str


   .. py:attribute:: type
      :type:  abacusai.api_class.enums.ResponseSectionType


   .. py:attribute:: id
      :type:  str


   .. py:method:: __post_init__()


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:data:: Segment

.. py:class:: AgentFlowButtonResponseSection(label, agent_workflow_node_name, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an AI Agent can return to render a button.

   :param label: The label of the button.
   :type label: str
   :param agent_workflow_node_name: The workflow start node to be executed when the button is clicked.
   :type agent_workflow_node_name: str


   .. py:attribute:: label
      :type:  str


   .. py:attribute:: agent_workflow_node_name
      :type:  str


.. py:class:: ImageUrlResponseSection(url, height, width, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render an image.

   :param url: The url of the image to be displayed.
   :type url: str
   :param height: The height of the image.
   :type height: int
   :param width: The width of the image.
   :type width: int


   .. py:attribute:: url
      :type:  str


   .. py:attribute:: height
      :type:  int


   .. py:attribute:: width
      :type:  int


.. py:class:: TextResponseSection(text, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render text.

   :param segment: The text to be displayed.
   :type segment: str


   .. py:attribute:: segment
      :type:  str


.. py:class:: RuntimeSchemaResponseSection(json_schema, ui_schema = None, schema_prop = None)

   Bases: :py:obj:`ResponseSection`


   A segment that an agent can return to render json and ui schema in react-jsonschema-form format for workflow nodes.
   This is primarily used to generate dynamic forms at runtime. If a node returns a runtime schema variable, the UI will render the form upon node execution.

   :param json_schema: json schema in RJSF format.
   :type json_schema: dict
   :param ui_schema: ui schema in RJSF format.
   :type ui_schema: dict


   .. py:attribute:: json_schema
      :type:  dict


   .. py:attribute:: ui_schema
      :type:  dict


.. py:class:: CodeResponseSection(code, language, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render code.

   :param code: The code to be displayed.
   :type code: str
   :param language: The language of the code.
   :type language: CodeLanguage


   .. py:attribute:: code
      :type:  str


   .. py:attribute:: language
      :type:  abacusai.api_class.enums.CodeLanguage


.. py:class:: Base64ImageResponseSection(b64_image, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render a base64 image.

   :param b64_image: The base64 image to be displayed.
   :type b64_image: str


   .. py:attribute:: b64_image
      :type:  str


.. py:class:: CollapseResponseSection(title, content, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render a collapsible component.

   :param title: The title of the collapsible component.
   :type title: str
   :param content: The response section constituting the content of collapsible component
   :type content: ResponseSection


   .. py:attribute:: title
      :type:  str


   .. py:attribute:: content
      :type:  ResponseSection


   .. py:method:: to_dict()

      Standardizes converting an ApiClass to dictionary.
      Keys of response dictionary are converted to camel case.
      This also validates the fields ( type, value, etc ) received in the dictionary.



.. py:class:: ListResponseSection(items, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render a list.

   :param items: The list items to be displayed.
   :type items: List[str]


   .. py:attribute:: items
      :type:  List[str]


.. py:class:: ChartResponseSection(chart, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render a chart.

   :param chart: The chart to be displayed.
   :type chart: dict


   .. py:attribute:: chart
      :type:  dict


.. py:class:: DataframeResponseSection(df, header = None, section_key = None)

   Bases: :py:obj:`ResponseSection`


   A response section that an agent can return to render a pandas dataframe.
   :param df: The dataframe to be displayed.
   :type df: pandas.DataFrame
   :param header: Heading of the table to be displayed.
   :type header: str


   .. py:attribute:: df
      :type:  Any


   .. py:attribute:: header
      :type:  str


.. py:class:: ApiEndpoint(client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None, hostingDomain=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An collection of endpoints which can be used to make requests to, such as api calls or predict calls

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param apiEndpoint: The URI that can be used to make API calls
   :type apiEndpoint: str
   :param predictEndpoint: The URI that can be used to make predict calls against Deployments
   :type predictEndpoint: str
   :param proxyEndpoint: The URI that can be used to make proxy server calls
   :type proxyEndpoint: str
   :param llmEndpoint: The URI that can be used to make llm api calls
   :type llmEndpoint: str
   :param externalChatEndpoint: The URI that can be used to access the external chat
   :type externalChatEndpoint: str
   :param dashboardEndpoint: The URI that the external chat will use to go back to the dashboard
   :type dashboardEndpoint: str
   :param hostingDomain: The domain for hosted app deployments
   :type hostingDomain: str


   .. py:attribute:: api_endpoint
      :value: None



   .. py:attribute:: predict_endpoint
      :value: None



   .. py:attribute:: proxy_endpoint
      :value: None



   .. py:attribute:: llm_endpoint
      :value: None



   .. py:attribute:: external_chat_endpoint
      :value: None



   .. py:attribute:: dashboard_endpoint
      :value: None



   .. py:attribute:: hosting_domain
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ApiKey(client, apiKeyId=None, apiKey=None, apiKeySuffix=None, tag=None, type=None, createdAt=None, expiresAt=None, isExpired=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An API Key to authenticate requests to the Abacus.AI API

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param apiKeyId: The unique ID for the API key
   :type apiKeyId: str
   :param apiKey: The unique API key scoped to a specific organization. Value will be partially obscured.
   :type apiKey: str
   :param apiKeySuffix: The last 4 characters of the API key.
   :type apiKeySuffix: str
   :param tag: A user-friendly tag for the API key.
   :type tag: str
   :param type: The type of the API key, either 'default', 'code-llm', or 'computer-use'.
   :type type: str
   :param createdAt: The timestamp when the API key was created.
   :type createdAt: str
   :param expiresAt: The timestamp when the API key will expire.
   :type expiresAt: str
   :param isExpired: Whether the API key has expired.
   :type isExpired: bool


   .. py:attribute:: api_key_id
      :value: None



   .. py:attribute:: api_key
      :value: None



   .. py:attribute:: api_key_suffix
      :value: None



   .. py:attribute:: tag
      :value: None



   .. py:attribute:: type
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: expires_at
      :value: None



   .. py:attribute:: is_expired
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: delete()

      Delete a specified API key.

      :param api_key_id: The ID of the API key to delete.
      :type api_key_id: str



.. py:class:: AppUserGroup(client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, users={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An app user group. This is used to determine which users have permissions for external chatbots.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the user group.
   :type name: str
   :param userGroupId: The unique identifier of the user group.
   :type userGroupId: str
   :param externalApplicationIds: The ids of the external applications the group has access to.
   :type externalApplicationIds: list[str]
   :param invitedUserEmails: The emails of the users invited to the user group who have not yet accepted the invite.
   :type invitedUserEmails: list[str]
   :param publicUserGroup: Boolean flag whether the app user group is the public user group of the org or not.
   :type publicUserGroup: bool
   :param hasExternalApplicationReporting: Whether users in the App User Group have permission to view all reports in their organization.
   :type hasExternalApplicationReporting: bool
   :param isExternalServiceGroup: Whether the App User Group corresponds to a user group that's defined in an external service (i.e Microsft Active Directory or Okta) or not
   :type isExternalServiceGroup: bool
   :param externalServiceGroupId: The identifier that corresponds to the app user group's external service group representation
   :type externalServiceGroupId: str
   :param users: The users in the user group.
   :type users: User


   .. py:attribute:: name
      :value: None



   .. py:attribute:: user_group_id
      :value: None



   .. py:attribute:: external_application_ids
      :value: None



   .. py:attribute:: invited_user_emails
      :value: None



   .. py:attribute:: public_user_group
      :value: None



   .. py:attribute:: has_external_application_reporting
      :value: None



   .. py:attribute:: is_external_service_group
      :value: None



   .. py:attribute:: external_service_group_id
      :value: None



   .. py:attribute:: users


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AppUserGroupSignInToken(client, token=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   User Group Sign In Token

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param token: The token to sign in the user
   :type token: str


   .. py:attribute:: token
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ApplicationConnector(client, applicationConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A connector to an external service

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param applicationConnectorId: The unique ID for the connection.
   :type applicationConnectorId: str
   :param service: The service this connection connects to
   :type service: str
   :param name: A user-friendly name for the service
   :type name: str
   :param createdAt: When the API key was created
   :type createdAt: str
   :param status: The status of the Application Connector
   :type status: str
   :param auth: Non-secret connection information for this connector
   :type auth: dict


   .. py:attribute:: application_connector_id
      :value: None



   .. py:attribute:: service
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: auth
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: rename(name)

      Renames a Application Connector

      :param name: A new name for the application connector.
      :type name: str



   .. py:method:: delete()

      Delete an application connector.

      :param application_connector_id: The unique identifier for the application connector.
      :type application_connector_id: str



   .. py:method:: list_objects()

      Lists querable objects in the application connector.

      :param application_connector_id: Unique string identifier for the application connector.
      :type application_connector_id: str



   .. py:method:: verify()

      Checks if Abacus.AI can access the application using the provided application connector ID.

      :param application_connector_id: Unique string identifier for the application connector.
      :type application_connector_id: str



.. py:class:: AudioGenSettings(client, model=None, settings=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Audio generation settings

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param model: names of models available for audio generation.
   :type model: dict
   :param settings: settings for each model.
   :type settings: dict


   .. py:attribute:: model
      :value: None



   .. py:attribute:: settings
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: BatchPrediction(client, batchPredictionId=None, createdAt=None, name=None, deploymentId=None, fileConnectorOutputLocation=None, databaseConnectorId=None, databaseOutputConfiguration=None, fileOutputFormat=None, connectorType=None, legacyInputLocation=None, outputFeatureGroupId=None, featureGroupTableName=None, outputFeatureGroupTableName=None, summaryFeatureGroupTableName=None, csvInputPrefix=None, csvPredictionPrefix=None, csvExplanationsPrefix=None, outputIncludesMetadata=None, resultInputColumns=None, modelMonitorId=None, modelVersion=None, bpAcrossVersionsMonitorId=None, algorithm=None, batchPredictionArgsType=None, batchInputs={}, latestBatchPredictionVersion={}, refreshSchedules={}, inputFeatureGroups={}, globalPredictionArgs={}, batchPredictionArgs={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Make batch predictions.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param batchPredictionId: The unique identifier of the batch prediction request.
   :type batchPredictionId: str
   :param createdAt: When the batch prediction was created, in ISO-8601 format.
   :type createdAt: str
   :param name: Name given to the batch prediction object.
   :type name: str
   :param deploymentId: The deployment used to make the predictions.
   :type deploymentId: str
   :param fileConnectorOutputLocation: Contains information about where the batch predictions are written to.
   :type fileConnectorOutputLocation: str
   :param databaseConnectorId: The database connector to write the results to.
   :type databaseConnectorId: str
   :param databaseOutputConfiguration: Contains information about where the batch predictions are written to.
   :type databaseOutputConfiguration: dict
   :param fileOutputFormat: The format of the batch prediction output (CSV or JSON).
   :type fileOutputFormat: str
   :param connectorType: Null if writing to internal console, else FEATURE_GROUP | FILE_CONNECTOR | DATABASE_CONNECTOR.
   :type connectorType: str
   :param legacyInputLocation: The location of the input data.
   :type legacyInputLocation: str
   :param outputFeatureGroupId: The Batch Prediction output feature group ID if applicable
   :type outputFeatureGroupId: str
   :param featureGroupTableName: The table name of the Batch Prediction output feature group.
   :type featureGroupTableName: str
   :param outputFeatureGroupTableName: The table name of the Batch Prediction output feature group.
   :type outputFeatureGroupTableName: str
   :param summaryFeatureGroupTableName: The table name of the metrics summary feature group output by Batch Prediction.
   :type summaryFeatureGroupTableName: str
   :param csvInputPrefix: A prefix to prepend to the input columns, only applies when output format is CSV.
   :type csvInputPrefix: str
   :param csvPredictionPrefix: A prefix to prepend to the prediction columns, only applies when output format is CSV.
   :type csvPredictionPrefix: str
   :param csvExplanationsPrefix: A prefix to prepend to the explanation columns, only applies when output format is CSV.
   :type csvExplanationsPrefix: str
   :param outputIncludesMetadata: If true, output will contain columns including prediction start time, batch prediction version, and model version.
   :type outputIncludesMetadata: bool
   :param resultInputColumns: If present, will limit result files or feature groups to only include columns present in this list.
   :type resultInputColumns: list
   :param modelMonitorId: The model monitor for this batch prediction.
   :type modelMonitorId: str
   :param modelVersion: The model instance used in the deployment for the batch prediction.
   :type modelVersion: str
   :param bpAcrossVersionsMonitorId: The model monitor for this batch prediction across versions.
   :type bpAcrossVersionsMonitorId: str
   :param algorithm: The algorithm that is currently deployed.
   :type algorithm: str
   :param batchPredictionArgsType: The type of batch prediction arguments used for this batch prediction.
   :type batchPredictionArgsType: str
   :param batchInputs: Inputs to the batch prediction.
   :type batchInputs: PredictionInput
   :param latestBatchPredictionVersion: The latest batch prediction version.
   :type latestBatchPredictionVersion: BatchPredictionVersion
   :param refreshSchedules: List of refresh schedules that dictate the next time the batch prediction will be run.
   :type refreshSchedules: RefreshSchedule
   :param inputFeatureGroups: List of prediction feature groups.
   :type inputFeatureGroups: PredictionFeatureGroup
   :param globalPredictionArgs:
   :type globalPredictionArgs: BatchPredictionArgs
   :param batchPredictionArgs: Argument(s) passed to every prediction call.
   :type batchPredictionArgs: BatchPredictionArgs


   .. py:attribute:: batch_prediction_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: file_connector_output_location
      :value: None



   .. py:attribute:: database_connector_id
      :value: None



   .. py:attribute:: database_output_configuration
      :value: None



   .. py:attribute:: file_output_format
      :value: None



   .. py:attribute:: connector_type
      :value: None



   .. py:attribute:: legacy_input_location
      :value: None



   .. py:attribute:: output_feature_group_id
      :value: None



   .. py:attribute:: feature_group_table_name
      :value: None



   .. py:attribute:: output_feature_group_table_name
      :value: None



   .. py:attribute:: summary_feature_group_table_name
      :value: None



   .. py:attribute:: csv_input_prefix
      :value: None



   .. py:attribute:: csv_prediction_prefix
      :value: None



   .. py:attribute:: csv_explanations_prefix
      :value: None



   .. py:attribute:: output_includes_metadata
      :value: None



   .. py:attribute:: result_input_columns
      :value: None



   .. py:attribute:: model_monitor_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: bp_across_versions_monitor_id
      :value: None



   .. py:attribute:: algorithm
      :value: None



   .. py:attribute:: batch_prediction_args_type
      :value: None



   .. py:attribute:: batch_inputs


   .. py:attribute:: latest_batch_prediction_version


   .. py:attribute:: refresh_schedules


   .. py:attribute:: input_feature_groups


   .. py:attribute:: global_prediction_args


   .. py:attribute:: batch_prediction_args


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: start()

      Creates a new batch prediction version job for a given batch prediction job description.

      :param batch_prediction_id: The unique identifier of the batch prediction to create a new version of.
      :type batch_prediction_id: str

      :returns: The batch prediction version started by this method call.
      :rtype: BatchPredictionVersion



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: BatchPrediction



   .. py:method:: describe()

      Describe the batch prediction.

      :param batch_prediction_id: The unique identifier associated with the batch prediction.
      :type batch_prediction_id: str

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      Retrieves a list of versions of a given batch prediction

      :param limit: Number of versions to list.
      :type limit: int
      :param start_after_version: Version to start after.
      :type start_after_version: str

      :returns: List of batch prediction versions.
      :rtype: list[BatchPredictionVersion]



   .. py:method:: update(deployment_id = None, global_prediction_args = None, batch_prediction_args = None, explanations = None, output_format = None, csv_input_prefix = None, csv_prediction_prefix = None, csv_explanations_prefix = None, output_includes_metadata = None, result_input_columns = None, name = None)

      Update a batch prediction job description.

      :param deployment_id: Unique identifier of the deployment.
      :type deployment_id: str
      :param batch_prediction_args: Batch Prediction args specific to problem type.
      :type batch_prediction_args: BatchPredictionArgs
      :param output_format: If specified, sets the format of the batch prediction output (CSV or JSON).
      :type output_format: str
      :param csv_input_prefix: Prefix to prepend to the input columns, only applies when output format is CSV.
      :type csv_input_prefix: str
      :param csv_prediction_prefix: Prefix to prepend to the prediction columns, only applies when output format is CSV.
      :type csv_prediction_prefix: str
      :param csv_explanations_prefix: Prefix to prepend to the explanation columns, only applies when output format is CSV.
      :type csv_explanations_prefix: str
      :param output_includes_metadata: If True, output will contain columns including prediction start time, batch prediction version, and model version.
      :type output_includes_metadata: bool
      :param result_input_columns: If present, will limit result files or feature groups to only include columns present in this list.
      :type result_input_columns: list
      :param name: If present, will rename the batch prediction.
      :type name: str

      :returns: The batch prediction.
      :rtype: BatchPrediction



   .. py:method:: set_file_connector_output(output_format = None, output_location = None)

      Updates the file connector output configuration of the batch prediction

      :param output_format: The format of the batch prediction output (CSV or JSON). If not specified, the default format will be used.
      :type output_format: str
      :param output_location: The location to write the prediction results. If not specified, results will be stored in Abacus.AI.
      :type output_location: str

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: set_database_connector_output(database_connector_id = None, database_output_config = None)

      Updates the database connector output configuration of the batch prediction

      :param database_connector_id: Unique string identifier of an Database Connection to write predictions to.
      :type database_connector_id: str
      :param database_output_config: Key-value pair of columns/values to write to the database connector.
      :type database_output_config: dict

      :returns: Description of the batch prediction.
      :rtype: BatchPrediction



   .. py:method:: set_feature_group_output(table_name)

      Creates a feature group and sets it as the batch prediction output.

      :param table_name: Name of the feature group table to create.
      :type table_name: str

      :returns: Batch prediction after the output has been applied.
      :rtype: BatchPrediction



   .. py:method:: set_output_to_console()

      Sets the batch prediction output to the console, clearing both the file connector and database connector configurations.

      :param batch_prediction_id: The unique identifier of the batch prediction.
      :type batch_prediction_id: str

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: set_feature_group(feature_group_type, feature_group_id = None)

      Sets the batch prediction input feature group.

      :param feature_group_type: Enum string representing the feature group type to set. The type is based on the use case under which the feature group is being created (e.g. Catalog Attributes for personalized recommendation use case).
      :type feature_group_type: str
      :param feature_group_id: Unique identifier of the feature group to set as input to the batch prediction.
      :type feature_group_id: str

      :returns: Description of the batch prediction.
      :rtype: BatchPrediction



   .. py:method:: set_dataset_remap(dataset_id_remap)

      For the purpose of this batch prediction, will swap out datasets in the training feature groups

      :param dataset_id_remap: Key/value pairs of dataset ids to be replaced during the batch prediction.
      :type dataset_id_remap: dict

      :returns: Batch prediction object.
      :rtype: BatchPrediction



   .. py:method:: delete()

      Deletes a batch prediction and associated data, such as associated monitors.

      :param batch_prediction_id: Unique string identifier of the batch prediction.
      :type batch_prediction_id: str



   .. py:method:: wait_for_predictions(timeout=86400)

      A waiting call until batch predictions are ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_drift_monitor(timeout=86400)

      A waiting call until batch prediction drift monitor calculations are ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the latest batch prediction version.

      :returns: A string describing the status of the latest batch prediction version e.g., pending, complete, etc.
      :rtype: str



   .. py:method:: create_refresh_policy(cron)

      To create a refresh policy for a batch prediction.

      :param cron: A cron style string to set the refresh time.
      :type cron: str

      :returns: The refresh policy object.
      :rtype: RefreshPolicy



   .. py:method:: list_refresh_policies()

      Gets the refresh policies in a list.

      :returns: A list of refresh policy objects.
      :rtype: List[RefreshPolicy]



   .. py:method:: describe_output_feature_group()

      Gets the results feature group for this batch prediction

      :returns: A feature group object.
      :rtype: FeatureGroup



   .. py:method:: load_results_as_pandas()

      Loads the output feature groups into a python pandas dataframe.

      :returns: A pandas dataframe with annotations and text_snippet columns.
      :rtype: DataFrame



.. py:class:: BatchPredictionVersion(client, batchPredictionVersion=None, batchPredictionId=None, status=None, driftMonitorStatus=None, deploymentId=None, modelId=None, modelVersion=None, predictionsStartedAt=None, predictionsCompletedAt=None, databaseOutputError=None, totalPredictions=None, failedPredictions=None, databaseConnectorId=None, databaseOutputConfiguration=None, fileConnectorOutputLocation=None, fileOutputFormat=None, connectorType=None, legacyInputLocation=None, error=None, driftMonitorError=None, monitorWarnings=None, csvInputPrefix=None, csvPredictionPrefix=None, csvExplanationsPrefix=None, databaseOutputTotalWrites=None, databaseOutputFailedWrites=None, outputIncludesMetadata=None, resultInputColumns=None, modelMonitorVersion=None, algoName=None, algorithm=None, outputFeatureGroupId=None, outputFeatureGroupVersion=None, outputFeatureGroupTableName=None, batchPredictionWarnings=None, bpAcrossVersionsMonitorVersion=None, batchPredictionArgsType=None, batchInputs={}, inputFeatureGroups={}, globalPredictionArgs={}, batchPredictionArgs={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Batch Prediction Version

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param batchPredictionVersion: The unique identifier of the batch prediction version
   :type batchPredictionVersion: str
   :param batchPredictionId: The unique identifier of the batch prediction
   :type batchPredictionId: str
   :param status: The current status of the batch prediction
   :type status: str
   :param driftMonitorStatus: The status of the drift monitor for this batch prediction version
   :type driftMonitorStatus: str
   :param deploymentId: The deployment used to make the predictions
   :type deploymentId: str
   :param modelId: The model used to make the predictions
   :type modelId: str
   :param modelVersion: The model version used to make the predictions
   :type modelVersion: str
   :param predictionsStartedAt: Predictions start date and time
   :type predictionsStartedAt: str
   :param predictionsCompletedAt: Predictions completion date and time
   :type predictionsCompletedAt: str
   :param databaseOutputError: If true, there were errors reported by the database connector while writing
   :type databaseOutputError: bool
   :param totalPredictions: Number of predictions performed in this batch prediction job
   :type totalPredictions: int
   :param failedPredictions: Number of predictions that failed
   :type failedPredictions: int
   :param databaseConnectorId: The database connector to write the results to
   :type databaseConnectorId: str
   :param databaseOutputConfiguration: Contains information about where the batch predictions are written to
   :type databaseOutputConfiguration: dict
   :param fileConnectorOutputLocation: Contains information about where the batch predictions are written to
   :type fileConnectorOutputLocation: str
   :param fileOutputFormat: The format of the batch prediction output (CSV or JSON)
   :type fileOutputFormat: str
   :param connectorType: Null if writing to internal console, else FEATURE_GROUP | FILE_CONNECTOR | DATABASE_CONNECTOR
   :type connectorType: str
   :param legacyInputLocation: The location of the input data
   :type legacyInputLocation: str
   :param error: Relevant error if the status is FAILED
   :type error: str
   :param driftMonitorError: Error message for the drift monitor of this batch predcition
   :type driftMonitorError: str
   :param monitorWarnings: Relevant warning if there are issues found in drift or data integrity
   :type monitorWarnings: str
   :param csvInputPrefix: A prefix to prepend to the input columns, only applies when output format is CSV
   :type csvInputPrefix: str
   :param csvPredictionPrefix: A prefix to prepend to the prediction columns, only applies when output format is CSV
   :type csvPredictionPrefix: str
   :param csvExplanationsPrefix: A prefix to prepend to the explanation columns, only applies when output format is CSV
   :type csvExplanationsPrefix: str
   :param databaseOutputTotalWrites: The total number of rows attempted to write (may be less than total_predictions if write mode is UPSERT and multiple rows share the same ID)
   :type databaseOutputTotalWrites: int
   :param databaseOutputFailedWrites: The number of failed writes to the Database Connector
   :type databaseOutputFailedWrites: int
   :param outputIncludesMetadata: If true, output will contain columns including prediction start time, batch prediction version, and model version
   :type outputIncludesMetadata: bool
   :param resultInputColumns: If present, will limit result files or feature groups to only include columns present in this list
   :type resultInputColumns: list[str]
   :param modelMonitorVersion: The version of the model monitor
   :type modelMonitorVersion: str
   :param algoName: The name of the algorithm used to train the model
   :type algoName: str
   :param algorithm: The algorithm that is currently deployed.
   :type algorithm: str
   :param outputFeatureGroupId: The Batch Prediction output feature group ID if applicable
   :type outputFeatureGroupId: str
   :param outputFeatureGroupVersion: The Batch Prediction output feature group version if applicable
   :type outputFeatureGroupVersion: str
   :param outputFeatureGroupTableName: The Batch Prediction output feature group name if applicable
   :type outputFeatureGroupTableName: str
   :param batchPredictionWarnings: Relevant warnings if any issues are found
   :type batchPredictionWarnings: str
   :param bpAcrossVersionsMonitorVersion: The version of the batch prediction across versions monitor
   :type bpAcrossVersionsMonitorVersion: str
   :param batchPredictionArgsType: The type of the batch prediction args
   :type batchPredictionArgsType: str
   :param batchInputs: Inputs to the batch prediction
   :type batchInputs: PredictionInput
   :param inputFeatureGroups: List of prediction feature groups
   :type inputFeatureGroups: PredictionFeatureGroup
   :param globalPredictionArgs:
   :type globalPredictionArgs: BatchPredictionArgs
   :param batchPredictionArgs: Argument(s) passed to every prediction call
   :type batchPredictionArgs: BatchPredictionArgs


   .. py:attribute:: batch_prediction_version
      :value: None



   .. py:attribute:: batch_prediction_id
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: drift_monitor_status
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: predictions_started_at
      :value: None



   .. py:attribute:: predictions_completed_at
      :value: None



   .. py:attribute:: database_output_error
      :value: None



   .. py:attribute:: total_predictions
      :value: None



   .. py:attribute:: failed_predictions
      :value: None



   .. py:attribute:: database_connector_id
      :value: None



   .. py:attribute:: database_output_configuration
      :value: None



   .. py:attribute:: file_connector_output_location
      :value: None



   .. py:attribute:: file_output_format
      :value: None



   .. py:attribute:: connector_type
      :value: None



   .. py:attribute:: legacy_input_location
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: drift_monitor_error
      :value: None



   .. py:attribute:: monitor_warnings
      :value: None



   .. py:attribute:: csv_input_prefix
      :value: None



   .. py:attribute:: csv_prediction_prefix
      :value: None



   .. py:attribute:: csv_explanations_prefix
      :value: None



   .. py:attribute:: database_output_total_writes
      :value: None



   .. py:attribute:: database_output_failed_writes
      :value: None



   .. py:attribute:: output_includes_metadata
      :value: None



   .. py:attribute:: result_input_columns
      :value: None



   .. py:attribute:: model_monitor_version
      :value: None



   .. py:attribute:: algo_name
      :value: None



   .. py:attribute:: algorithm
      :value: None



   .. py:attribute:: output_feature_group_id
      :value: None



   .. py:attribute:: output_feature_group_version
      :value: None



   .. py:attribute:: output_feature_group_table_name
      :value: None



   .. py:attribute:: batch_prediction_warnings
      :value: None



   .. py:attribute:: bp_across_versions_monitor_version
      :value: None



   .. py:attribute:: batch_prediction_args_type
      :value: None



   .. py:attribute:: batch_inputs


   .. py:attribute:: input_feature_groups


   .. py:attribute:: global_prediction_args


   .. py:attribute:: batch_prediction_args


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: download_batch_prediction_result_chunk(offset = 0, chunk_size = 10485760)

      Returns a stream containing the batch prediction results.

      :param offset: The offset to read from.
      :type offset: int
      :param chunk_size: The maximum amount of data to read.
      :type chunk_size: int



   .. py:method:: get_batch_prediction_connector_errors()

      Returns a stream containing the batch prediction database connection write errors, if any writes failed for the specified batch prediction job.

      :param batch_prediction_version: Unique string identifier of the batch prediction job to get the errors for.
      :type batch_prediction_version: str



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: BatchPredictionVersion



   .. py:method:: describe()

      Describes a Batch Prediction Version.

      :param batch_prediction_version: Unique string identifier of the Batch Prediction Version.
      :type batch_prediction_version: str

      :returns: The Batch Prediction Version.
      :rtype: BatchPredictionVersion



   .. py:method:: get_logs()

      Retrieves the batch prediction logs.

      :param batch_prediction_version: The unique version ID of the batch prediction version.
      :type batch_prediction_version: str

      :returns: The logs for the specified batch prediction version.
      :rtype: BatchPredictionVersionLogs



   .. py:method:: download_result_to_file(file)

      Downloads the batch prediction version in a local file.

      :param file: A file object opened in a binary mode e.g., file=open('/tmp/output', 'wb').
      :type file: file object



   .. py:method:: wait_for_predictions(timeout=86400)

      A waiting call until batch prediction version is ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_drift_monitor(timeout=86400)

      A waiting call until batch prediction drift monitor calculations are ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status(drift_monitor_status = False)

      Gets the status of the batch prediction version.

      :returns: A string describing the status of the batch prediction version, for e.g., pending, complete, etc.
      :rtype: str



   .. py:method:: load_results_as_pandas()

      Loads the output feature groups into a python pandas dataframe.

      :returns: A pandas dataframe with annotations and text_snippet columns.
      :rtype: DataFrame



.. py:class:: BatchPredictionVersionLogs(client, logs=None, warnings=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs from batch prediction version.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param logs: List of logs from batch prediction version.
   :type logs: list[str]
   :param warnings: List of warnings from batch prediction version.
   :type warnings: list[str]


   .. py:attribute:: logs
      :value: None



   .. py:attribute:: warnings
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: BotInfo(client, externalApplicationId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Information about an external application and LLM.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param externalApplicationId: The external application ID.
   :type externalApplicationId: str


   .. py:attribute:: external_application_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CategoricalRangeViolation(client, name=None, mostCommonValues=None, freqOutsideTrainingRange=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Summary of important range mismatches for a numerical feature discovered by a model monitoring instance

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: Name of feature.
   :type name: str
   :param mostCommonValues: List of most common feature names in the prediction distribution not present in the training distribution.
   :type mostCommonValues: list[str]
   :param freqOutsideTrainingRange: Frequency of prediction rows outside training distribution for the specified feature.
   :type freqOutsideTrainingRange: float


   .. py:attribute:: name
      :value: None



   .. py:attribute:: most_common_values
      :value: None



   .. py:attribute:: freq_outside_training_range
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ChatMessage(client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None, computePointsUsed=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single chat message with Abacus Chat.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param role: The role of the message sender
   :type role: str
   :param text: A list of text segments for the message
   :type text: list[dict]
   :param timestamp: The timestamp at which the message was sent
   :type timestamp: str
   :param isUseful: Whether this message was marked as useful or not
   :type isUseful: bool
   :param feedback: The feedback provided for the message
   :type feedback: str
   :param docIds: A list of IDs of the uploaded document if the message has
   :type docIds: list[str]
   :param hotkeyTitle: The title of the hotkey prompt if the message has one
   :type hotkeyTitle: str
   :param tasks: The list of spawned tasks, if the message was broken down into smaller sub-tasks.
   :type tasks: list[str]
   :param keywordArguments: A dict of kwargs used to generate the response.
   :type keywordArguments: dict
   :param computePointsUsed: The number of compute points used for the message.
   :type computePointsUsed: int


   .. py:attribute:: role
      :value: None



   .. py:attribute:: text
      :value: None



   .. py:attribute:: timestamp
      :value: None



   .. py:attribute:: is_useful
      :value: None



   .. py:attribute:: feedback
      :value: None



   .. py:attribute:: doc_ids
      :value: None



   .. py:attribute:: hotkey_title
      :value: None



   .. py:attribute:: tasks
      :value: None



   .. py:attribute:: keyword_arguments
      :value: None



   .. py:attribute:: compute_points_used
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ChatSession(client, answer=None, chatSessionId=None, projectId=None, name=None, createdAt=None, status=None, aiBuildingInProgress=None, notification=None, whiteboard=None, chatHistory={}, nextAiBuildingTask={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A chat session with Abacus Data Science Co-pilot.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param answer: The response from the chatbot
   :type answer: str
   :param chatSessionId: The chat session id
   :type chatSessionId: str
   :param projectId: The project id associated with the chat session
   :type projectId: str
   :param name: The name of the chat session
   :type name: str
   :param createdAt: The timestamp at which the chat session was created
   :type createdAt: str
   :param status: The status of the chat sessions
   :type status: str
   :param aiBuildingInProgress: Whether the AI building is in progress or not
   :type aiBuildingInProgress: bool
   :param notification: A warn/info message about the chat session. For example, a suggestion to create a new session if the current one is too old
   :type notification: str
   :param whiteboard: A set of whiteboard notes associated with the chat session
   :type whiteboard: str
   :param chatHistory: The chat history for the conversation
   :type chatHistory: ChatMessage
   :param nextAiBuildingTask: The next AI building task for the chat session
   :type nextAiBuildingTask: AiBuildingTask


   .. py:attribute:: answer
      :value: None



   .. py:attribute:: chat_session_id
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: ai_building_in_progress
      :value: None



   .. py:attribute:: notification
      :value: None



   .. py:attribute:: whiteboard
      :value: None



   .. py:attribute:: chat_history


   .. py:attribute:: next_ai_building_task


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: get()

      Gets a chat session from Data Science Co-pilot.

      :param chat_session_id: Unique ID of the chat session.
      :type chat_session_id: str

      :returns: The chat session with Data Science Co-pilot
      :rtype: ChatSession



   .. py:method:: delete_chat_message(message_index)

      Deletes a message in a chat session and its associated response.

      :param message_index: The index of the chat message within the UI.
      :type message_index: int



   .. py:method:: export()

      Exports a chat session to an HTML file

      :param chat_session_id: Unique ID of the chat session.
      :type chat_session_id: str



   .. py:method:: rename(name)

      Renames a chat session with Data Science Co-pilot.

      :param name: The new name of the chat session.
      :type name: str



.. py:class:: ChatllmComputer(client, computerId=None, token=None, vncEndpoint=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   ChatLLMComputer

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param computerId: The computer id.
   :type computerId: int
   :param token: The token.
   :type token: str
   :param vncEndpoint: The VNC endpoint.
   :type vncEndpoint: str


   .. py:attribute:: computer_id
      :value: None



   .. py:attribute:: token
      :value: None



   .. py:attribute:: vnc_endpoint
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ChatllmProject(client, chatllmProjectId=None, name=None, description=None, customInstructions=None, createdAt=None, updatedAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   ChatLLM Project

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param chatllmProjectId: The ID of the chatllm project.
   :type chatllmProjectId: id
   :param name: The name of the chatllm project.
   :type name: str
   :param description: The description of the chatllm project.
   :type description: str
   :param customInstructions: The custom instructions of the chatllm project.
   :type customInstructions: str
   :param createdAt: The creation time of the chatllm project.
   :type createdAt: str
   :param updatedAt: The update time of the chatllm project.
   :type updatedAt: str


   .. py:attribute:: chatllm_project_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: custom_instructions
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ChatllmReferralInvite(client, userAlreadyExists=None, successfulInvites=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The response of the Chatllm Referral Invite for different emails

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param userAlreadyExists: List of user emails not successfullt invited, because they are already registered users.
   :type userAlreadyExists: list
   :param successfulInvites: List of users successfully invited.
   :type successfulInvites: list


   .. py:attribute:: user_already_exists
      :value: None



   .. py:attribute:: successful_invites
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ChatllmTask(client, chatllmTaskId=None, name=None, instructions=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, enableEmailAlerts=None, email=None, numUnreadTaskInstances=None, computePointsUsed=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A chatllm task

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param chatllmTaskId: The id of the chatllm task.
   :type chatllmTaskId: str
   :param name: The name of the chatllm task.
   :type name: str
   :param instructions: The instructions of the chatllm task.
   :type instructions: str
   :param lifecycle: The lifecycle of the chatllm task.
   :type lifecycle: str
   :param scheduleInfo: The schedule info of the chatllm task.
   :type scheduleInfo: dict
   :param externalApplicationId: The external application id associated with the chatllm task.
   :type externalApplicationId: str
   :param deploymentConversationId: The deployment conversation id associated with the chatllm task.
   :type deploymentConversationId: str
   :param enableEmailAlerts: Whether email alerts are enabled for the chatllm task.
   :type enableEmailAlerts: bool
   :param email: The email to send alerts to.
   :type email: str
   :param numUnreadTaskInstances: The number of unread task instances for the chatllm task.
   :type numUnreadTaskInstances: int
   :param computePointsUsed: The compute points used for the chatllm task.
   :type computePointsUsed: int


   .. py:attribute:: chatllm_task_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: instructions
      :value: None



   .. py:attribute:: lifecycle
      :value: None



   .. py:attribute:: schedule_info
      :value: None



   .. py:attribute:: external_application_id
      :value: None



   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: enable_email_alerts
      :value: None



   .. py:attribute:: email
      :value: None



   .. py:attribute:: num_unread_task_instances
      :value: None



   .. py:attribute:: compute_points_used
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: AgentResponse(*args, **kwargs)

   Response object for agent to support attachments, section data and normal data


   .. py:attribute:: data_list
      :value: []



   .. py:attribute:: section_data_list
      :value: []



   .. py:method:: __getstate__()

      Return state values to be pickled.



   .. py:method:: __setstate__(state)

      Restore state from the unpickled state values.



   .. py:method:: to_dict()

      Get a dict representation of the response object



   .. py:method:: __getattr__(item)


.. py:class:: ApiClient(api_key = None, server = None, client_options = None, skip_version_check = False, include_tb = False)

   Bases: :py:obj:`ReadOnlyClient`


   Abacus.AI API Client

   :param api_key: The api key to use as authentication to the server
   :type api_key: str
   :param server: The base server url to use to send API requets to
   :type server: str
   :param client_options: Optional API client configurations
   :type client_options: ClientOptions
   :param skip_version_check: If true, will skip checking the server's current API version on initializing the client
   :type skip_version_check: bool


   .. py:method:: create_dataset_from_pandas(feature_group_table_name, df, clean_column_names = False)

      [Deprecated]
      Creates a Dataset from a pandas dataframe

      :param feature_group_table_name: The table name to assign to the feature group created by this call
      :type feature_group_table_name: str
      :param df: The dataframe to upload
      :type df: pandas.DataFrame
      :param clean_column_names: If true, the dataframe's column names will be automatically cleaned to be complaint with Abacus.AI's column requirements. Otherwise it will raise a ValueError.
      :type clean_column_names: bool

      :returns: The dataset object created
      :rtype: Dataset



   .. py:method:: get_assignments_online_with_new_inputs(deployment_token, deployment_id, assignments_df = None, constraints_df = None, constraint_equations_df = None, feature_mapping_dict = None, solve_time_limit_seconds = None)

      Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: ID
      :param assignments_df: A dataframe with all the variables involved in the optimization problem
      :type assignments_df: pd.DataFrame
      :param constraints_df: A dataframe of individual constraints, and variables in them
      :type constraints_df: pd.DataFrame
      :param constraint_equations_df: A dataframe which tells us about the operator / constant / penalty etc of a constraint
                                      This gives us some data which is needed to make sense of the constraints_df.
      :type constraint_equations_df: pd.DataFrame
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float

      :returns: The assignments for a given query.
      :rtype: OptimizationAssignment



   .. py:method:: create_dataset_version_from_pandas(table_name_or_id, df, clean_column_names = False)

      [Deprecated]
      Updates an existing dataset from a pandas dataframe

      :param table_name_or_id: The table name of the feature group or the ID of the dataset to update
      :type table_name_or_id: str
      :param df: The dataframe to upload
      :type df: pandas.DataFrame
      :param clean_column_names: If true, the dataframe's column names will be automatically cleaned to be complaint with Abacus.AI's column requirements. Otherwise it will raise a ValueError.
      :type clean_column_names: bool

      :returns: The dataset updated
      :rtype: Dataset



   .. py:method:: create_feature_group_from_pandas_df(table_name, df, clean_column_names = False)

      Create a Feature Group from a local Pandas DataFrame.

      :param table_name: The table name to assign to the feature group created by this call
      :type table_name: str
      :param df: The dataframe to upload and use as the data source for the feature group
      :type df: pandas.DataFrame
      :param clean_column_names: If true, the dataframe's column names will be automatically cleaned to be complaint with Abacus.AI's column requirements. Otherwise it will raise a ValueError.
      :type clean_column_names: bool



   .. py:method:: update_feature_group_from_pandas_df(table_name, df, clean_column_names = False)

      Updates a DATASET Feature Group from a local Pandas DataFrame.

      :param table_name: The table name of the existing feature group to update. A feature group with this name must exist and must have source type DATASET.
      :type table_name: str
      :param df: The dataframe to upload
      :type df: pandas.DataFrame
      :param clean_column_names: If true, the dataframe's column names will be automatically cleaned to be complaint with Abacus.AI's column requirements. Otherwise it will raise a ValueError.
      :type clean_column_names: bool



   .. py:method:: create_feature_group_from_spark_df(table_name, df)

      Create a Feature Group from a local Spark DataFrame.

      :param df: The dataframe to upload
      :type df: pyspark.sql.DataFrame
      :param table_name: The table name to assign to the feature group created by this call
      :type table_name: str



   .. py:method:: update_feature_group_from_spark_df(table_name, df)

      Create a Feature Group from a local Spark DataFrame.

      :param df: The dataframe to upload
      :type df: pyspark.sql.DataFrame
      :param table_name: The table name to assign to the feature group created by this call
      :type table_name: str
      :param should_wait_for_upload: Wait for dataframe to upload before returning. Some FeatureGroup methods, like materialization, may not work until upload is complete.
      :type should_wait_for_upload: bool
      :param timeout: If waiting for upload, time out after this limit.
      :type timeout: int



   .. py:method:: create_spark_df_from_feature_group_version(session, feature_group_version)

      Create a Spark Dataframe in the provided Spark Session context, for a materialized Abacus Feature Group Version.

      :param session: Spark session
      :type session: pyspark.sql.SparkSession
      :param feature_group_version: Feature group version to load from
      :type feature_group_version: str

      :returns: pyspark.sql.DataFrame



   .. py:method:: create_prediction_operator_from_functions(name, project_id, predict_function = None, initialize_function = None, feature_group_ids = None, cpu_size = None, memory = None, included_modules = None, package_requirements = None, use_gpu = False)

      Create a new prediction operator.

      :param name: Name of the prediction operator.
      :type name: str
      :param project_id: The unique ID of the associated project.
      :type project_id: str
      :param predict_function: The function that will be executed to run predictions.
      :type predict_function: callable
      :param initialize_function: The initialization function that can generate anything used by predictions, based on input feature groups.
      :type initialize_function: callable
      :param feature_group_ids: List of feature groups that are supplied to the initialize function as parameters. Each of the parameters are materialized Dataframes.
      :type feature_group_ids: list
      :param cpu_size: Size of the CPU for the prediction operator.
      :type cpu_size: str
      :param memory: Memory (in GB) for the  prediction operator.
      :type memory: int
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_gpu: Whether this rediction operator needs gpu.
      :type use_gpu: bool

      Returns
          PredictionOperator: The updated prediction operator object.



   .. py:method:: update_prediction_operator_from_functions(prediction_operator_id, name = None, predict_function = None, initialize_function = None, feature_group_ids = None, cpu_size = None, memory = None, included_modules = None, package_requirements = None, use_gpu = False)

      Update an existing prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str
      :param name: The name of the prediction operator
      :type name: str
      :param predict_function: The predict function callable to serialize and upload
      :type predict_function: callable
      :param initialize_function: The initialize function callable to serialize and upload
      :type initialize_function: callable
      :param feature_group_ids: List of feature groups that are supplied to the initialize function as parameters. Each of the parameters are materialized Dataframes. The order should match the initialize function's parameters.
      :type feature_group_ids: list
      :param cpu_size: Size of the cpu for the training function
      :type cpu_size: str
      :param memory: Memory (in GB) for the training function
      :type memory: int
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param use_gpu: Whether this prediction needs gpu
      :type use_gpu: bool



   .. py:method:: create_model_from_functions(project_id, train_function, predict_function = None, training_input_tables = None, predict_many_function = None, initialize_function = None, cpu_size = None, memory = None, training_config = None, exclusive_run = False, included_modules = None, package_requirements = None, name = None, use_gpu = False, is_thread_safe = None)

      Creates a model from a python function

      :param project_id: The project to create the model in
      :type project_id: str
      :param train_function: The training fucntion callable to serialize and upload
      :type train_function: callable
      :param predict_function: The predict function callable to serialize and upload
      :type predict_function: callable
      :param predict_many_function: The predict many function callable to serialize and upload
      :type predict_many_function: callable
      :param initialize_function: The initialize function callable to serialize and upload
      :type initialize_function: callable
      :param training_input_tables: The input table names of the feature groups to pass to the train function
      :type training_input_tables: list
      :param cpu_size: Size of the cpu for the training function
      :type cpu_size: str
      :param memory: Memory (in GB) for the training function
      :type memory: int
      :param training_config: Training configuration
      :type training_config: TrainingConfig
      :param exclusive_run: Decides if this model will be run exclusively or along with other Abacus.AI algorithms
      :type exclusive_run: bool
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list
      :param name: The name of the model
      :type name: str
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool
      :param is_thread_safe: Whether the model is thread safe
      :type is_thread_safe: bool



   .. py:method:: update_model_from_functions(model_id, train_function, predict_function = None, predict_many_function = None, initialize_function = None, training_input_tables = None, cpu_size = None, memory = None, included_modules = None, package_requirements = None, use_gpu = False, is_thread_safe = None)

      Creates a model from a python function. Please pass in all the functions, even if you don't update it.

      :param model_id: The id of the model to update
      :type model_id: str
      :param train_function: The training fucntion callable to serialize and upload
      :type train_function: callable
      :param predict_function: The predict function callable to serialize and upload
      :type predict_function: callable
      :param predict_many_function: The predict many function callable to serialize and upload
      :type predict_many_function: callable
      :param initialize_function: The initialize function callable to serialize and upload
      :type initialize_function: callable
      :param training_input_tables: The input table names of the feature groups to pass to the train function
      :type training_input_tables: list
      :param cpu_size: Size of the cpu for the training function
      :type cpu_size: str
      :param memory: Memory (in GB) for the training function
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool
      :param is_thread_safe: Whether the model is thread safe
      :type is_thread_safe: bool



   .. py:method:: create_pipeline_step_from_function(pipeline_id, step_name, function, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None, included_modules = None, timeout = None)

      Creates a step in a given pipeline from a python function.

      :param pipeline_id: The ID of the pipeline to add the step to.
      :type pipeline_id: str
      :param step_name: The name of the step.
      :type step_name: str
      :param function: The python function.
      :type function: callable
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List[PythonFunctionArguments]
      :param output_variable_mappings: List of Python function ouputs.
      :type output_variable_mappings: List[OutputVariableMapping]
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: List[str]
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list
      :param timeout: Timeout for how long the step can run in minutes, default is 300 minutes.
      :type timeout: int



   .. py:method:: update_pipeline_step_from_function(pipeline_step_id, function, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None, included_modules = None, timeout = None)

      Updates a pipeline step from a python function.

      :param pipeline_step_id: The ID of the pipeline_step to update.
      :type pipeline_step_id: str
      :param function: The python function.
      :type function: callable
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List[PythonFunctionArguments]
      :param output_variable_mappings: List of Python function ouputs.
      :type output_variable_mappings: List[OutputVariableMapping]
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: List[str]
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list
      :param timeout: Timeout for the step in minutes, default is 300 minutes.
      :type timeout: int



   .. py:method:: create_python_function_from_function(name, function, function_variable_mappings = None, package_requirements = None, function_type = PythonFunctionType.FEATURE_GROUP.value)

      Creates a custom Python function

      :param name: The name to identify the Python function.
      :type name: str
      :param function: The function callable to serialize and upload.
      :type function: callable
      :param function_variable_mappings: List of Python function arguments.
      :type function_variable_mappings: List<PythonFunctionArguments>
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: List
      :param function_type: Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
      :type function_type: PythonFunctionType



   .. py:method:: create_feature_group_from_python_function(function, table_name, input_tables = None, python_function_name = None, python_function_bindings = None, cpu_size = None, memory = None, package_requirements = None, included_modules = None)

      Creates a feature group from a python function

      :param function: The function callable for the feature group
      :type function: callable
      :param table_name: The table name to give the feature group
      :type table_name: str
      :param input_tables: The input table names of the feature groups as input to the feature group function
      :type input_tables: list
      :param python_function_name: The name of the python function to create a feature group from.
      :type python_function_name: str
      :param python_function_bindings: List of python function arguments
      :type python_function_bindings: List<PythonFunctionArguments>
      :param cpu_size: Size of the cpu for the feature group function
      :type cpu_size: str
      :param memory: Memory (in GB) for the feature group function
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list



   .. py:method:: update_python_function_code(name, function = None, function_variable_mappings = None, package_requirements = None, included_modules = None)

      Update custom python function with user inputs for the given python function.

      :param name: The unique name to identify the python function in an organization.
      :type name: String
      :param function: The function callable to serialize and upload.
      :type function: callable
      :param function_variable_mappings: List of python function arguments
      :type function_variable_mappings: List<PythonFunctionArguments>
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list

      :returns: The python_function object.
      :rtype: PythonFunction



   .. py:method:: create_algorithm_from_function(name, problem_type, training_data_parameter_names_mapping = None, training_config_parameter_name = None, train_function = None, predict_function = None, predict_many_function = None, initialize_function = None, common_functions = None, config_options = None, is_default_enabled = False, project_id = None, use_gpu = False, package_requirements = None, included_modules = None)

      Create a new algorithm, or update existing algorithm if the name already exists

      :param name: The name to identify the algorithm, only uppercase letters, numbers and underscore allowed
      :type name: String
      :param problem_type: The type of the problem this algorithm will work on
      :type problem_type: str
      :param train_function: The training function callable to serialize and upload
      :type train_function: callable
      :param predict_function: The predict function callable to serialize and upload
      :type predict_function: callable
      :param predict_many_function: The predict many function callable to serialize and upload
      :type predict_many_function: callable
      :param initialize_function: The initialize function callable to serialize and upload
      :type initialize_function: callable
      :param common_functions: A list of functions that will be used by both train and predict functions, e.g. some data processing utilities
      :type common_functions: List of callables
      :param training_data_parameter_names_mapping: The mapping from feature group types to training data parameter names in the train function
      :type training_data_parameter_names_mapping: Dict
      :param training_config_parameter_name: The train config parameter name in the train function
      :type training_config_parameter_name: string
      :param config_options: Map dataset types and configs to train function parameter names
      :type config_options: Dict
      :param is_default_enabled: Whether train with the algorithm by default
      :type is_default_enabled: bool
      :param project_id: The unique version ID of the project
      :type project_id: Unique String Identifier
      :param use_gpu: Whether this algorithm needs to run on GPU
      :type use_gpu: Boolean
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list



   .. py:method:: update_algorithm_from_function(algorithm, training_data_parameter_names_mapping = None, training_config_parameter_name = None, train_function = None, predict_function = None, predict_many_function = None, initialize_function = None, common_functions = None, config_options = None, is_default_enabled = None, use_gpu = None, package_requirements = None, included_modules = None)

      Create a new algorithm, or update existing algorithm if the name already exists

      :param algorithm: The name to identify the algorithm, only uppercase letters, numbers and underscore allowed
      :type algorithm: String
      :param train_function: The training fucntion callable to serialize and upload
      :type train_function: callable
      :param predict_function: The predict function callable to serialize and upload
      :type predict_function: callable
      :param predict_many_function: The predict many function callable to serialize and upload
      :type predict_many_function: callable
      :param initialize_function: The initialize function callable to serialize and upload
      :type initialize_function: callable
      :param common_functions: A list of functions that will be used by both train and predict functions, e.g. some data processing utilities
      :type common_functions: List of callables
      :param training_data_parameter_names_mapping: The mapping from feature group types to training data parameter names in the train function
      :type training_data_parameter_names_mapping: Dict
      :param training_config_parameter_name: The train config parameter name in the train function
      :type training_config_parameter_name: string
      :param config_options: Map dataset types and configs to train function parameter names
      :type config_options: Dict
      :param is_default_enabled: Whether train with the algorithm by default
      :type is_default_enabled: Boolean
      :param use_gpu: Whether this algorithm needs to run on GPU
      :type use_gpu: Boolean
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param included_modules: List of names of user-created modules that will be included, which is equivalent to 'from module import *'
      :type included_modules: list



   .. py:method:: get_train_function_input(project_id, training_table_names = None, training_data_parameter_name_override = None, training_config_parameter_name_override = None, training_config = None, custom_algorithm_config = None)

      Get the input data for the train function to test locally.

      :param project_id: The id of the project
      :type project_id: String
      :param training_table_names: A list of feature group tables used for training
      :type training_table_names: List
      :param training_data_parameter_name_override: The mapping from feature group types to training data parameter names in the train function
      :type training_data_parameter_name_override: Dict
      :param training_config_parameter_name_override: The train config parameter name in the train function
      :type training_config_parameter_name_override: String
      :param training_config: A dictionary for Abacus.AI defined training options and values
      :type training_config: Dict
      :param custom_algorithm_config: User-defined config that can be serialized by JSON
      :type custom_algorithm_config: Any

      :returns: A dictionary that maps train function parameter names to their values.



   .. py:method:: get_train_function_input_from_model_version(model_version, algorithm = None, training_config = None, custom_algorithm_config = None)

      Get the input data for the train function to test locally, based on a trained model version.

      :param model_version: The string identifier of the model version
      :type model_version: String
      :param algorithm: The particular algorithm's name, whose train function to test with
      :type algorithm: String
      :param training_config: A dictionary for Abacus.AI defined training options and values
      :type training_config: Dict
      :param custom_algorithm_config: User-defined config that can be serialized by JSON
      :type custom_algorithm_config: Any

      :returns: A dictionary that maps train function parameter names to their values.



   .. py:method:: create_custom_loss_function(name, loss_function_type, loss_function)

      Registers a new custom loss function which can be used as an objective function during model training.

      :param name: A name for the loss. Should be unique per organization. Limit - 50 chars. Only underscores, numbers, uppercase alphabets allowed
      :type name: String
      :param loss_function_type: The category of problems that this loss would be applicable to. Ex - REGRESSION_DL_TF, CLASSIFICATION_DL_TF, etc.
      :type loss_function_type: String
      :param loss_function: A python functor which can take required arguments (Ex - (y_true, y_pred)) and returns loss value(s) (Ex - An array of loss values of size batch size)
      :type loss_function: Callable

      :returns: A description of the registered custom loss function
      :rtype: CustomLossFunction

      :raises InvalidParameterError: If either loss function name or type or the passed function is invalid/incompatible
      :raises AlreadyExistsError: If the loss function with the same name already exists in the organization



   .. py:method:: update_custom_loss_function(name, loss_function)

      Updates a previously registered custom loss function with a new function implementation.

      :param name: name of the registered custom loss.
      :type name: String
      :param loss_function: A python functor which can take required arguments (Ex - (y_true, y_pred)) and returns loss value(s) (Ex - An array of loss values of size batch size)
      :type loss_function: Callable

      :returns: A description of the updated custom loss function
      :rtype: CustomLossFunction

      :raises InvalidParameterError: If either loss function name or type or the passed function is invalid/incompatible
      :raises DataNotFoundError: If a loss function with given name is not found in the organization



   .. py:method:: create_custom_metric_from_function(name, problem_type, custom_metric_function)

      Registers a new custom metric which can be used as an evaluation metric for the trained model.

      :param name: A name for the metric. Should be unique per organization. Limit - 50 chars. Only underscores, numbers, uppercase alphabets allowed.
      :type name: String
      :param problem_type: The problem type that this metric would be applicable to. e.g. - REGRESSION, FORECASTING, etc.
      :type problem_type: String
      :param custom_metric_function: A python functor which can take required arguments e.g. (y_true, y_pred) and returns the metric value.
      :type custom_metric_function: Callable

      :returns: The newly created custom metric.
      :rtype: CustomMetric

      :raises InvalidParameterError: If either custom metric name or type or the passed function is invalid/incompatible.
      :raises AlreadyExistsError: If a custom metric with given name already exists in the organization.



   .. py:method:: update_custom_metric_from_function(name, custom_metric_function)

      Updates a previously registered custom metric.

      :param name: A name for the metric. Should be unique per organization. Limit - 50 chars. Only underscores, numbers, uppercase alphabets allowed.
      :type name: String
      :param custom_metric_function: A python functor which can take required arguments e.g. (y_true, y_pred) and returns the metric value.
      :type custom_metric_function: Callable

      :returns: The updated custom metric.
      :rtype: CustomMetric

      :raises InvalidParameterError: If either custom metric name or type or the passed function is invalid/incompatible.
      :raises DataNotFoundError: If a custom metric with given name is not found in the organization.



   .. py:method:: create_module_from_notebook(file_path, name)

      Create a module with the code marked in the notebook. Use '#module_start#' to mark the starting code cell and '#module_end#' for the
      ending code cell.

      :param file_path: Notebook's relative path to the root directory, e.g. 'n1.ipynb'
      :type file_path: String
      :param name: Name of the module to create.
      :type name: String

      :returns: the created Abacus.ai module object
      :rtype: Module



   .. py:method:: update_module_from_notebook(file_path, name)

      Update the module with the code marked in the notebook. Use '#module_start#' to mark the starting code cell and '#module_end#' for the
      ending code cell.

      :param file_path: Notebook's relative path to the root directory, e.g. 'n1.ipynb'
      :type file_path: String
      :param name: Name of the module to create.
      :type name: String

      :returns: the created Abacus.ai module object
      :rtype: Module



   .. py:method:: import_module(name)

      Import a module created previously. It will reload if has been imported before.
      This will be equivalent to including from that module file.

      :param name: Name of the module to import.
      :type name: String

      :returns: the imported python module
      :rtype: module



   .. py:method:: run_workflow_graph(workflow_graph, sample_user_inputs = {}, agent_workflow_node_id = None, agent_interface = None, package_requirements = None)

      Validates the workflow graph by running the flow using sample user inputs for an AI Agent.

      :param workflow_graph: The workflow graph to validate.
      :type workflow_graph: WorkflowGraph
      :param sample_user_inputs: Contains sample values for variables of type user_input for starting node
      :type sample_user_inputs: dict
      :param agent_workflow_node_id: Node id from which we want to run workflow
      :type agent_workflow_node_id: str
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list

      :returns: The output variables for every node in workflow which has executed.
      :rtype: dict



   .. py:method:: execute_workflow_node(node, inputs)

      Execute the workflow node given input arguments.

      :param node: The workflow node to be executed.
      :type node: WorkflowGraphNode
      :param inputs: The inputs to be passed to the node function.
      :type inputs: dict

      :returns: The outputs returned by node execution.
      :rtype: dict



   .. py:method:: create_agent_from_function(project_id, agent_function, name = None, memory = None, package_requirements = None, description = None, evaluation_feature_group_id = None, workflow_graph = None)

      [Deprecated]
      Creates the agent from a python function

      :param project_id: The project to create the model in
      :type project_id: str
      :param agent_function: The agent function callable to serialize and upload
      :type agent_function: callable
      :param name: The name of the agent
      :type name: str
      :param memory: Memory (in GB) for hosting the agent
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param description: A description of the agent.
      :type description: str
      :param evaluation_feature_group_id: The ID of the feature group to use for evaluation.
      :type evaluation_feature_group_id: str
      :param workflow_graph: The workflow graph for the agent.
      :type workflow_graph: WorkflowGraph



   .. py:method:: update_agent_with_function(model_id, agent_function, memory = None, package_requirements = None, enable_binary_input = None, description = None, workflow_graph = None)

      [Deprecated]
      Updates the agent with a new agent function.

      :param model_id: The unique ID associated with the AI Agent to be changed.
      :type model_id: str
      :param agent_function: The new agent function callable to serialize and upload
      :type agent_function: callable
      :param memory: Memory (in GB) for hosting the agent
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: List
      :param enable_binary_input: If True, the agent will be able to accept binary data as inputs.
      :type enable_binary_input: bool
      :param description: A description of the agent.
      :type description: str
      :param workflow_graph: The workflow graph for the agent.
      :type workflow_graph: WorkflowGraph



   .. py:method:: _attempt_deployment_sql_execution(sql)


   .. py:method:: execute_feature_group_sql(sql, fix_query_on_error = False, timeout=3600, delay=2, use_latest_version=True)

      Execute a SQL query on the feature groups

      :param sql: The SQL query to execute.
      :type sql: str
      :param fix_query_on_error: If enabled, SQL query is auto fixed if parsing fails.
      :type fix_query_on_error: bool
      :param use_latest_version: If enabled, executes the query on the latest version of the feature group, and if version doesn't exist, FailedDependencyError is sent. If disabled, query is executed considering the latest feature group state irrespective of the latest version of the feature group. Defaults to True
      :type use_latest_version: bool

      :returns: The result of the query.
      :rtype: pandas.DataFrame



   .. py:method:: _get_agent_client_type()

      Returns the client type for the current request context.

      :returns: The client type for the current request context.
      :rtype: AgentClientType



   .. py:method:: get_agent_context_chat_history()

      Gets a history of chat messages from the current request context. Applicable within a AIAgent
      execute function.

      :returns: The chat history for the current request being processed by the Agent.
      :rtype: List[AgentChatMessage]



   .. py:method:: set_agent_context_chat_history(chat_history)

      Sets the history of chat messages from the current request context.

      :param chat_history: The chat history associated with the current request context.
      :type chat_history: List[AgentChatMessage]



   .. py:method:: get_agent_context_chat_history_for_llm()

      Gets a history of chat messages from the current request context. Applicable within a AIAgent
      execute function.

      :returns: The messages in format suitable for llm.
      :rtype: AgentConversation



   .. py:method:: get_agent_context_conversation_id()

      Gets the deployment conversation ID from the current request context. Applicable within a AIAgent
      execute function.

      :returns: The deployment conversation ID for the current request being processed by the Agent.
      :rtype: str



   .. py:method:: set_agent_context_conversation_id(conversation_id)

      Sets the deployment conversation ID from the current request context.

      :param conversation_id: The deployment conversation ID for the current request being processed by the Agent.
      :type conversation_id: str



   .. py:method:: get_agent_context_external_session_id()

      Gets the external session ID from the current request context if it has been set with the request.
      Applicable within a AIAgent execute function.

      :returns: The external session ID for the current request being processed by the Agent.
      :rtype: str



   .. py:method:: set_agent_context_external_session_id(external_session_id)

      Sets the external session ID from the current request context if it has been set with the request.

      :param external_session_id: The external session ID for the current request being processed by the Agent.
      :type external_session_id: str



   .. py:method:: get_agent_context_doc_ids()

      Gets the document ID from the current request context if a document has been uploaded with the request.
      Applicable within a AIAgent execute function.

      :returns: The document IDs the current request being processed by the Agent.
      :rtype: List[str]



   .. py:method:: set_agent_context_doc_ids(doc_ids)

      Sets the doc_ids from the current request context.

      :param doc_ids: The doc_ids associated with the current request context.
      :type doc_ids: List[str]



   .. py:method:: get_agent_context_doc_infos()

      Gets the document information from the current request context if documents have been uploaded with the request.
      Applicable within a AIAgent execute function.

      :returns: The document information for the current request being processed by the Agent.
      :rtype: List[dict]



   .. py:method:: set_agent_context_doc_infos(doc_infos)

      Sets the doc_infos in the current request context.

      :param doc_infos: The document information associated with the current request context.
      :type doc_infos: List[dict]



   .. py:method:: get_agent_context_blob_inputs()

      Gets the BlobInputs from the current request context if a document has been uploaded with the request.
      Applicable within a AIAgent execute function.

      :returns: The BlobInput the current request being processed by the Agent.
      :rtype: List[BlobInput]



   .. py:method:: get_agent_context_user_info()

      Gets information about the user interacting with the agent and user action if applicable.
      Applicable within a AIAgent execute function.

      :returns: Containing email and name of the end user.
      :rtype: dict



   .. py:method:: get_runtime_config(key)

      Retrieve the value of a specified configuration key from the deployment's runtime settings.
      These settings can be configured in the deployment details page in the UI.
      Currently supported for AI Agents, Custom Python Model and Prediction Operators.

      :param key: The configuration key whose value is to be fetched.
      :type key: str

      :returns: The value associated with the specified configuration key, or None if the key does not exist.
      :rtype: str



   .. py:method:: get_request_user_info()

      Gets the user information for the current request context.

      :returns: Containing email and name of the end user.
      :rtype: dict



   .. py:method:: clear_agent_context()

      Clears the current request context.



   .. py:method:: execute_chatllm_computer_streaming(computer_id, prompt, is_transient = False)

      Executes a prompt on a remote computer and streams computer responses to the external chat UI in real-time. Must be called from agent execution context only.

      :param computer_id: The ID of the computer to use for the agent.
      :type computer_id: str
      :param prompt: The prompt to do tasks on the computer.
      :type prompt: str
      :param is_transient: If True, the message will be marked as transient and will not be persisted on reload in external chatllm UI. Transient messages are useful for streaming interim updates or results.
      :type is_transient: bool

      :returns: The text responses from the computer.
      :rtype: text (str)



   .. py:method:: streaming_evaluate_prompt(prompt = None, system_message = None, llm_name = None, max_tokens = None, temperature = 0.0, messages = None, response_type = None, json_response_schema = None, section_key = None)

      Generate response to the prompt using the specified model. This works the same as `evaluate_prompt` but would stream the text to the UI section while generating and returns the streamed text as an object of a `str` subclass.

      :param prompt: Prompt to use for generation.
      :type prompt: str
      :param system_message: System prompt for models that support it.
      :type system_message: str
      :param llm_name: Name of the underlying LLM to be used for generation. Default is auto selection.
      :type llm_name: LLMName
      :param max_tokens: Maximum number of tokens to generate. If set, the model will just stop generating after this token limit is reached.
      :type max_tokens: int
      :param temperature: Temperature to use for generation. Higher temperature makes more non-deterministic responses, a value of zero makes mostly deterministic reponses. Default is 0.0. A range of 0.0 - 2.0 is allowed.
      :type temperature: float
      :param messages: A list of messages to use as conversation history. For completion models like OPENAI_GPT3_5_TEXT and PALM_TEXT this should not be set. A message is a dict with attributes: is_user (bool): Whether the message is from the user. text (str): The message's text.
      :type messages: list
      :param response_type: Specifies the type of response to request from the LLM. One of 'text' and 'json'. If set to 'json', the LLM will respond with a json formatted string whose schema can be specified `json_response_schema`. Defaults to 'text'
      :type response_type: str
      :param json_response_schema: A dictionary specifying the keys/schema/parameters which LLM should adhere to in its response when `response_type` is 'json'. Each parameter is mapped to a dict with the following info - type (str) (required): Data type of the parameter description (str) (required): Description of the parameter is_required (bool) (optional): Whether the parameter is required or not.     Example:     json_response_schema={         'title': {'type': 'string', 'description': 'Article title', 'is_required': true},         'body': {'type': 'string', 'description': 'Article body'},     }
      :type json_response_schema: dict
      :param section_key: Key to identify output schema section.
      :type section_key: str

      :returns: The response from the model.
      :rtype: text (str)



   .. py:method:: execute_python(source_code)

      Executes the given source code.

      :param source_code: The source code to execute.
      :type source_code: str

      :returns: stdout, stderr, exception for source_code execution



   .. py:method:: _get_agent_app_request_id()

      Gets the current request ID for the current request context of async app. Applicable within a AIAgent execute function.

      :returns: The request ID for the current request being processed by the Agent.
      :rtype: str



   .. py:method:: _get_agent_caller()

      Gets the caller for the current request context. Applicable within a AIAgent execute function.

      :returns: The caller for the current request being processed by the Agent.
      :rtype: str



   .. py:method:: _is_proxy_app_caller()

      Checks if the caller is cluster-proxy app.

      :returns: True if the caller is cluster-proxy app.
      :rtype: bool



   .. py:method:: _is_async_app_caller()

      Checks if the caller is async app.

      :returns: True if the caller is async app.
      :rtype: bool



   .. py:method:: stream_message(message, is_transient = False)

      Streams a message to the current request context. Applicable within a AIAgent execute function.
      If the request is from the abacus.ai app, the response will be streamed to the UI. otherwise would be logged info if used from notebook or python script.

      :param message: The message to be streamed.
      :type message: str
      :param is_transient: If True, the message will be marked as transient and will not be persisted on reload in external chatllm UI. Transient messages are useful for streaming interim updates or results.
      :type is_transient: bool



   .. py:method:: stream_section_output(section_key, value)

      Streams value corresponding to a particular section to the current request context. Applicable within a AIAgent execute function.
      If the request is from the abacus.ai app, the response will be streamed to the UI. otherwise would be logged info if used from notebook or python script.

      :param section_key: The section key to which the output corresponds.
      :type section_key: str
      :param value: The output contents.
      :type value: Any



   .. py:method:: stream_response_section(response_section)

      Streams a response section to the current request context. Applicable within a AIAgent execute function.
      If the request is from the abacus.ai app, the response will be streamed to the UI. otherwise returned as part of response if used from notebook or python script.

      :param response_section: The response section to be streamed.
      :type response_section: ResponseSection



   .. py:method:: _stream_llm_call(section_key=None, **kwargs)


   .. py:method:: _call_aiagent_app_send_message(request_id, caller, message=None, segment=None, llm_args=None, message_args=None, extra_args=None, proxy_caller=False)

      Calls the AI Agent app send message endpoint.

      :param request_id: The request ID for the current request being processed by the Agent.
      :type request_id: str
      :param caller: The caller for the current request being processed by the Agent.
      :type caller: str
      :param message: The message to send to the AsyncApp.
      :type message: str
      :param llm_args: The LLM arguments to send to the AsyncApp.
      :type llm_args: dict

      :returns: The response from the AsyncApp.
      :rtype: str



   .. py:method:: _status_poll(url, wait_states, method, body = {}, headers = None, delay = 1, timeout = 1200)


   .. py:method:: execute_data_query_using_llm(query, feature_group_ids, prompt_context = None, llm_name = None, temperature = None, preview = False, schema_document_retriever_ids = None, timeout=3600, delay=2, use_latest_version=True)

      Execute a data query using a large language model.

      :param query: The natural language query to execute. The query is converted to a SQL query using the language model.
      :type query: str
      :param feature_group_ids: A list of feature group IDs that the query should be executed against.
      :type feature_group_ids: List[str]
      :param prompt_context: The context message used to construct the prompt for the language model. If not provide, a default context message is used.
      :type prompt_context: str
      :param llm_name: The name of the language model to use. If not provided, the default language model is used.
      :type llm_name: str
      :param temperature: The temperature to use for the language model if supported. If not provided, the default temperature is used.
      :type temperature: float
      :param preview: If True, a preview of the query execution is returned.
      :type preview: bool
      :param schema_document_retriever_ids: A list of document retrievers to retrieve schema information for the data query. Otherwise, they are retrieved from the feature group metadata.
      :type schema_document_retriever_ids: List[str]
      :param timeout: Time limit for the call.
      :type timeout: int
      :param delay: Polling interval for checking timeout.
      :type delay: int
      :param use_latest_version: If enabled, executes the query on the latest version of the feature group, and if version doesn't exist, FailedDependencyError is sent. If disabled, query is executed considering the latest feature group state irrespective of the latest version of the feature group. Defaults to True.
      :type use_latest_version: bool

      :returns: The result of the query execution. Execution results could be loaded as pandas using 'load_as_pandas', i.e., result.execution.load_as_pandas().
      :rtype: LlmExecutionResult



   .. py:method:: _get_doc_retriever_deployment_info(document_retriever_id)


   .. py:method:: get_matching_documents(document_retriever_id, query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None, filter_clause = None, crowding_limits = None, include_text_search = False)

      Lookup document retrievers and return the matching documents from the document retriever deployed with given query.

      Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
      from the document retriever. The returned chunks could be expanded to include more words from the original documents and merged if they
      are overlapping, and permitted by the settings provided. The returned chunks are sorted by relevance.


      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str
      :param query: The query to search for.
      :type query: str
      :param filters: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filters: dict
      :param limit: If provided, will limit the number of results to the value specified.
      :type limit: int
      :param result_columns: If provided, will limit the column properties present in each result to those specified in this list.
      :type result_columns: list
      :param max_words: If provided, will limit the total number of words in the results to the value specified.
      :type max_words: int
      :param num_retrieval_margin_words: If provided, will add this number of words from left and right of the returned chunks.
      :type num_retrieval_margin_words: int
      :param max_words_per_chunk: If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
      :type max_words_per_chunk: int
      :param score_multiplier_column: If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
      :type score_multiplier_column: str
      :param min_score: If provided, will filter out the results with score lower than the value specified.
      :type min_score: float
      :param required_phrases: If provided, each result will have at least one of the phrases.
      :type required_phrases: list
      :param filter_clause: If provided, filter the results of the query using this sql where clause.
      :type filter_clause: str
      :param crowding_limits: A dictionary mapping metadata columns to the maximum number of results per unique value of the column. This is used to ensure diversity of metadata attribute values in the results. If a particular attribute value has already reached its maximum count, further results with that same attribute value will be excluded from the final result set.
      :type crowding_limits: dict
      :param include_text_search: If true, combine the ranking of results from a BM25 text search over the documents with the vector search using reciprocal rank fusion. It leverages both lexical and semantic matching for better overall results. It's particularly valuable in professional, technical, or specialized fields where both precision in terminology and understanding of context are important.
      :type include_text_search: bool

      :returns: The relevant documentation results found from the document retriever.
      :rtype: list[DocumentRetrieverLookupResult]



   .. py:method:: create_model_from_files(project_id, location, name = None, custom_artifact_filenames = {}, model_config = {})

      Creates a new Model and returns Upload IDs for uploading the model artifacts.

      Use this in supported use cases to provide a pre-trained model and supporting artifacts to be hosted on our platform.


      :param project_id: Unique string identifier associated with the project.
      :type project_id: str
      :param location: Cloud location for the model.
      :type location: str
      :param name: Name you want your model to have. Defaults to "<Project Name> Model".
      :type name: str
      :param custom_artifact_filenames: Optional mapping to specify which filename should be used for a given model artifact type.
      :type custom_artifact_filenames: dict
      :param model_config: Extra configurations that are specific to the model being created.
      :type model_config: dict

      :returns: The new model which is being trained.
      :rtype: Model



   .. py:method:: create_model_from_local_files(project_id, name = None, optional_artifacts = None, model_config = {})

      Creates a new Model and returns Upload IDs for uploading the model artifacts.

      Use this in supported use cases to provide a pre-trained model and supporting artifacts to be hosted on our platform.


      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param name: The name you want your model to have. Defaults to "<Project Name> Model".
      :type name: str
      :param optional_artifacts: A list of strings describing additional artifacts for the model. An example would be a verification file.
      :type optional_artifacts: list
      :param model_config: Extra configurations that are specific to the model being created.
      :type model_config: dict

      :returns: Collection of upload IDs to upload the model artifacts.
      :rtype: ModelUpload



   .. py:method:: create_model_version_from_files(model_id)

      Creates a new Model Version by re-importing from the paths specified when the model was created.

      :param model_id: Unique string identifier of the model to create a new version of with the new model artifacts.
      :type model_id: str

      :returns: The updated model.
      :rtype: ModelVersion



   .. py:method:: create_model_version_from_local_files(model_id, optional_artifacts = None)

      Creates a new Model Version and returns Upload IDs for uploading the associated model artifacts.

      :param model_id: Unique string identifier of the model to create a new version of with the new model artifacts.
      :type model_id: str
      :param optional_artifacts: List of strings describing additional artifacts for the model, e.g. a verification file.
      :type optional_artifacts: list

      :returns: Collection of upload IDs to upload the model artifacts.
      :rtype: ModelUpload



   .. py:method:: get_streaming_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False)

      Return an asynchronous generator which continues the conversation based on the input messages and search results.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param messages: A list of chronologically ordered messages, starting with a user message and alternating sources. A message is a dict with attributes:     is_user (bool): Whether the message is from the user.      text (str): The message's text.
      :type messages: list
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifying the query chat config override.
      :type chat_config: dict
      :param ignore_documents: If True, will ignore any documents and search results, and only use the messages to generate a response.
      :type ignore_documents: bool
      :param include_search_results: If True, will also return search results, if relevant.
      :type include_search_results: bool



   .. py:method:: get_streaming_conversation_response(deployment_token, deployment_id, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False)

      Return an asynchronous generator which continues the conversation based on the input messages and search results.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param message: A message from the user
      :type message: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifying the query chat config override.
      :type chat_config: dict
      :param ignore_documents: If True, will ignore any documents and search results, and only use the messages to generate a response.
      :type ignore_documents: bool
      :param include_search_results: If True, will also return search results, if relevant.
      :type include_search_results: bool



   .. py:method:: execute_conversation_agent_streaming(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, regenerate = False, doc_infos = None, agent_workflow_node_id = None)

      Return an asynchronous generator which gives out the agent response stream.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param arguments: A list of arguments to pass to the agent.
      :type arguments: list
      :param keyword_arguments: A dictionary of keyword arguments to pass to the agent.
      :type keyword_arguments: dict
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param regenerate: If True, will regenerate the conversation from the start.
      :type regenerate: bool
      :param doc_infos: A list of dictionaries containing information about the documents uploaded with the request.
      :type doc_infos: list
      :param agent_workflow_node_id: The unique identifier of the agent workflow node to trigger. If not specified, the primary node will be used.
      :type agent_workflow_node_id: str



   .. py:method:: set_cache_scope(scope)

      Set the scope of the cache, for example, deployment id.

      :param scope: The key of the cache entry.
      :type scope: String

      :returns: None



   .. py:method:: clear_cache_scope()

      Clear the scope set before, and let it to automatically figure out the scope to use. If nothing found, will run in local.



   .. py:method:: set_scoped_cache_value(key, value, expiration_time = 21600)

      Set the value to key in the cache scope. Scope will be automatically figured out inside a deployment, or set with set_cache_scope.
      If no scope found, will run in local.

      :param key: The key of the cache entry.
      :type key: String
      :param value: The value of the cache entry. Only string, integer and float numbers are supported now.
      :type value: String
      :param expiration_time: How long to keep the cache key before expire, in seconds. Default is 6h.
      :type expiration_time: int

      :returns: None

      :raises InvalidParameterError: If key, value or expiration_time is invalid.



   .. py:method:: get_scoped_cache_value(key)

      Get the value of the key in the cache scope. Scope will be automatically figured out inside a deployment, or set with set_cache_scope.
      If no scope found, will run in local.

      :param key: The key of the cache entry.
      :type key: String

      :returns: The value of the key
      :rtype: value (String)

      :raises Generic404Error: if the key doesn't exist.



   .. py:method:: delete_scoped_cache_key(key)

      Delete the value of the key in the cache scope. Scope will be automatically figured out inside a deployment, or set with set_cache_scope.
      If no scope found, will run in local.

      :param key: The key of the cache entry.
      :type key: String

      :returns: None



   .. py:method:: set_agent_response_document_sources(response_document_sources)

      Sets the document sources to be shown with the response on the conversation UI.

      :param response_document_sources: List of document retriever results to be displayed in order.
      :type response_document_sources: List

      :returns: None



   .. py:method:: get_initialized_data()

      Returns the object returned by the initialize_function during agent creation.
      :returns: Object returned in the initialize_function.



   .. py:method:: add_user_to_organization(email)

      Invite a user to your organization. This method will send the specified email address an invitation link to join your organization.

      :param email: The email address to invite to your organization.
      :type email: str



   .. py:method:: create_organization_group(group_name, permissions, default_group = False)

      Creates a new Organization Group.

      :param group_name: The name of the group.
      :type group_name: str
      :param permissions: The list of permissions to initialize the group with.
      :type permissions: list
      :param default_group: If True, this group will replace the current default group.
      :type default_group: bool

      :returns: Information about the created Organization Group.
      :rtype: OrganizationGroup



   .. py:method:: add_organization_group_permission(organization_group_id, permission)

      Adds a permission to the specified Organization Group.

      :param organization_group_id: Unique string identifier of the Organization Group.
      :type organization_group_id: str
      :param permission: Permission to add to the Organization Group.
      :type permission: str



   .. py:method:: remove_organization_group_permission(organization_group_id, permission)

      Removes a permission from the specified Organization Group.

      :param organization_group_id: Unique string identifier of the Organization Group.
      :type organization_group_id: str
      :param permission: The permission to remove from the Organization Group.
      :type permission: str



   .. py:method:: delete_organization_group(organization_group_id)

      Deletes the specified Organization Group

      :param organization_group_id: Unique string identifier of the organization group.
      :type organization_group_id: str



   .. py:method:: add_user_to_organization_group(organization_group_id, email)

      Adds a user to the specified Organization Group.

      :param organization_group_id: Unique string identifier of the Organization Group.
      :type organization_group_id: str
      :param email: Email of the user to be added to the group.
      :type email: str



   .. py:method:: remove_user_from_organization_group(organization_group_id, email)

      Removes a user from an Organization Group.

      :param organization_group_id: Unique string identifier of the Organization Group.
      :type organization_group_id: str
      :param email: Email of the user to remove.
      :type email: str



   .. py:method:: set_default_organization_group(organization_group_id)

      Sets the default Organization Group to which all new users joining an organization are automatically added.

      :param organization_group_id: Unique string identifier of the Organization Group.
      :type organization_group_id: str



   .. py:method:: delete_api_key(api_key_id)

      Delete a specified API key.

      :param api_key_id: The ID of the API key to delete.
      :type api_key_id: str



   .. py:method:: remove_user_from_organization(email)

      Removes the specified user from the organization. You can remove yourself; otherwise, you must be an organization administrator to use this method to remove other users from the organization.

      :param email: The email address of the user to remove from the organization.
      :type email: str



   .. py:method:: send_email(email, subject, body, is_html = False, attachments = None)

      Send an email to the specified email address with provided subject and contents.

      :param email: The email address to send the email to.
      :type email: str
      :param subject: The subject of the email.
      :type subject: str
      :param body: The body of the email.
      :type body: str
      :param is_html: Whether the body is html or not.
      :type is_html: bool
      :param attachments: A dictionary where the key is the filename (including the file extension), and the value is either a file-like object (e.g., an open file in binary mode) or raw file data (e.g., bytes).
      :type attachments: None



   .. py:method:: create_deployment_webhook(deployment_id, endpoint, webhook_event_type, payload_template = None)

      Create a webhook attached to a given deployment ID.

      :param deployment_id: Unique string identifier for the deployment this webhook will attach to.
      :type deployment_id: str
      :param endpoint: URI that the webhook will send HTTP POST requests to.
      :type endpoint: str
      :param webhook_event_type: One of 'DEPLOYMENT_START', 'DEPLOYMENT_SUCCESS', or 'DEPLOYMENT_FAILED'.
      :type webhook_event_type: str
      :param payload_template: Template for the body of the HTTP POST requests. Defaults to {}.
      :type payload_template: dict

      :returns: The webhook attached to the deployment.
      :rtype: Webhook



   .. py:method:: update_webhook(webhook_id, endpoint = None, webhook_event_type = None, payload_template = None)

      Update the webhook

      :param webhook_id: The ID of the webhook to be updated.
      :type webhook_id: str
      :param endpoint: If provided, changes the webhook's endpoint.
      :type endpoint: str
      :param webhook_event_type: If provided, changes the event type.
      :type webhook_event_type: str
      :param payload_template: If provided, changes the payload template.
      :type payload_template: dict



   .. py:method:: delete_webhook(webhook_id)

      Delete the webhook

      :param webhook_id: Unique identifier of the target webhook.
      :type webhook_id: str



   .. py:method:: create_project(name, use_case)

      Creates a project with the specified project name and use case. Creating a project creates a container for all datasets and models associated with a particular problem/project. For example, if you want to create a model to detect fraud, you need to first create a project, upload datasets, create feature groups, and then create one or more models to get predictions for your use case.

      :param name: The project's name.
      :type name: str
      :param use_case: The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/useCases) for further details of each use case. The following enums are currently available for you to choose from:  LANGUAGE_DETECTION,  NLP_SENTIMENT,  NLP_SEARCH,  NLP_CHAT,  CHAT_LLM,  NLP_SENTENCE_BOUNDARY_DETECTION,  NLP_CLASSIFICATION,  NLP_SUMMARIZATION,  NLP_DOCUMENT_VISUALIZATION,  AI_AGENT,  EMBEDDINGS_ONLY,  MODEL_WITH_EMBEDDINGS,  TORCH_MODEL,  TORCH_MODEL_WITH_EMBEDDINGS,  PYTHON_MODEL,  NOTEBOOK_PYTHON_MODEL,  DOCKER_MODEL,  DOCKER_MODEL_WITH_EMBEDDINGS,  CUSTOMER_CHURN,  ENERGY,  EVENT_ANOMALY_DETECTION,  FINANCIAL_METRICS,  CUMULATIVE_FORECASTING,  FRAUD_ACCOUNT,  FRAUD_TRANSACTIONS,  CLOUD_SPEND,  TIMESERIES_ANOMALY,  OPERATIONS_MAINTENANCE,  PERS_PROMOTIONS,  PREDICTING,  FEATURE_STORE,  RETAIL,  SALES_FORECASTING,  SALES_SCORING,  FEED_RECOMMEND,  USER_RANKINGS,  NAMED_ENTITY_RECOGNITION,  USER_RECOMMENDATIONS,  USER_RELATED,  VISION,  VISION_REGRESSION,  VISION_OBJECT_DETECTION,  FEATURE_DRIFT,  SCHEDULING,  GENERIC_FORECASTING,  PRETRAINED_IMAGE_TEXT_DESCRIPTION,  PRETRAINED_SPEECH_RECOGNITION,  PRETRAINED_STYLE_TRANSFER,  PRETRAINED_TEXT_TO_IMAGE_GENERATION,  PRETRAINED_OCR_DOCUMENT_TO_TEXT,  THEME_ANALYSIS,  CLUSTERING,  CLUSTERING_TIMESERIES,  FINETUNED_LLM,  PRETRAINED_INSTRUCT_PIX2PIX,  PRETRAINED_TEXT_CLASSIFICATION.
      :type use_case: str

      :returns: This object represents the newly created project.
      :rtype: Project



   .. py:method:: rename_project(project_id, name)

      This method renames a project after it is created.

      :param project_id: The unique identifier for the project.
      :type project_id: str
      :param name: The new name for the project.
      :type name: str



   .. py:method:: delete_project(project_id, force_delete = False)

      Delete a specified project from your organization.

      This method deletes the project, its associated trained models, and deployments. The datasets attached to the specified project remain available for use with other projects in the organization.

      This method will not delete a project that contains active deployments. Ensure that all active deployments are stopped before using the delete option.

      Note: All projects, models, and deployments cannot be recovered once they are deleted.


      :param project_id: The unique ID of the project to delete.
      :type project_id: str
      :param force_delete: If True, the project will be deleted even if it has active deployments.
      :type force_delete: bool



   .. py:method:: add_project_tags(project_id, tags)

      This method adds a tag to a project.

      :param project_id: The unique identifier for the project.
      :type project_id: str
      :param tags: The tags to add to the project.
      :type tags: list



   .. py:method:: remove_project_tags(project_id, tags)

      This method removes a tag from a project.

      :param project_id: The unique identifier for the project.
      :type project_id: str
      :param tags: The tags to remove from the project.
      :type tags: list



   .. py:method:: add_feature_group_to_project(feature_group_id, project_id, feature_group_type = 'CUSTOM_TABLE')

      Adds a feature group to a project.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_type: The feature group type of the feature group, based on the use case under which the feature group is being created.
      :type feature_group_type: str



   .. py:method:: set_project_feature_group_config(feature_group_id, project_id, project_config = None)

      Sets a feature group's project config

      :param feature_group_id: Unique string identifier for the feature group.
      :type feature_group_id: str
      :param project_id: Unique string identifier for the project.
      :type project_id: str
      :param project_config: Feature group's project configuration.
      :type project_config: ProjectFeatureGroupConfig



   .. py:method:: remove_feature_group_from_project(feature_group_id, project_id)

      Removes a feature group from a project.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param project_id: The unique ID associated with the project.
      :type project_id: str



   .. py:method:: set_feature_group_type(feature_group_id, project_id, feature_group_type = 'CUSTOM_TABLE')

      Update the feature group type in a project. The feature group must already be added to the project.

      :param feature_group_id: Unique identifier associated with the feature group.
      :type feature_group_id: str
      :param project_id: Unique identifier associated with the project.
      :type project_id: str
      :param feature_group_type: The feature group type to set the feature group as.
      :type feature_group_type: str



   .. py:method:: set_feature_mapping(project_id, feature_group_id, feature_name, feature_mapping = None, nested_column_name = None)

      Set a column's feature mapping. If the column mapping is single-use and already set in another column in this feature group, this call will first remove the other column's mapping and move it to this column.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature.
      :type feature_name: str
      :param feature_mapping: The mapping of the feature in the feature group.
      :type feature_mapping: str
      :param nested_column_name: The name of the nested column if the input feature is part of a nested feature group for the given feature_group_id.
      :type nested_column_name: str

      :returns: A list of objects that describes the resulting feature group's schema after the feature's featureMapping is set.
      :rtype: list[Feature]



   .. py:method:: add_annotation(annotation, feature_group_id, feature_name, doc_id = None, feature_group_row_identifier = None, annotation_source = 'ui', status = None, comments = None, project_id = None, save_metadata = False, pages = None)

      Add an annotation entry to the database.

      :param annotation: The annotation to add. Format of the annotation is determined by its annotation type.
      :type annotation: dict
      :param feature_group_id: The ID of the feature group the annotation is on.
      :type feature_group_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str
      :param annotation_source: Indicator of whether the annotation came from the UI, bulk upload, etc.
      :type annotation_source: str
      :param status: The status of the annotation. Can be one of 'todo', 'in_progress', 'done'. This is optional.
      :type status: str
      :param comments: Comments for the annotation. This is a dictionary of feature name to the corresponding comment. This is optional.
      :type comments: dict
      :param project_id: The ID of the project that the annotation is associated with. This is optional.
      :type project_id: str
      :param save_metadata: Whether to save the metadata for the annotation. This is optional.
      :type save_metadata: bool
      :param pages: pages (list): List of page numbers to consider while processing the annotation. This is optional. doc_id must be provided if pages is provided.
      :type pages: list

      :returns: The annotation entry that was added.
      :rtype: AnnotationEntry



   .. py:method:: describe_annotation(feature_group_id, feature_name = None, doc_id = None, feature_group_row_identifier = None)

      Get the latest annotation entry for a given feature group, feature, and document.

      :param feature_group_id: The ID of the feature group the annotation is on.
      :type feature_group_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str

      :returns: The latest annotation entry for the given feature group, feature, document, and/or annotation key value.
      :rtype: AnnotationEntry



   .. py:method:: update_annotation_status(feature_group_id, feature_name, status, doc_id = None, feature_group_row_identifier = None, save_metadata = False)

      Update the status of an annotation entry.

      :param feature_group_id: The ID of the feature group the annotation is on.
      :type feature_group_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param status: The new status of the annotation. Must be one of the following: 'TODO', 'IN_PROGRESS', 'DONE'.
      :type status: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str
      :param save_metadata: If True, save the metadata for the annotation entry.
      :type save_metadata: bool

      :returns: The updated annotation entry.
      :rtype: AnnotationEntry



   .. py:method:: get_document_to_annotate(feature_group_id, project_id, feature_name, feature_group_row_identifier = None, get_previous = False)

      Get an available document that needs to be annotated for a annotation feature group.

      :param feature_group_id: The ID of the feature group the annotation is on.
      :type feature_group_id: str
      :param project_id: The ID of the project that the annotation is associated with.
      :type project_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the primary key value. If provided, fetch the immediate next (or previous) available document.
      :type feature_group_row_identifier: str
      :param get_previous: If True, get the previous document instead of the next document. Applicable if feature_group_row_identifier is provided.
      :type get_previous: bool

      :returns: The document to annotate.
      :rtype: AnnotationDocument



   .. py:method:: import_annotation_labels(feature_group_id, file, annotation_type)

      Imports annotation labels from csv file. All valid values in the file will be imported as labels (including header row if present).

      :param feature_group_id: The unique string identifier of the feature group.
      :type feature_group_id: str
      :param file: The file to import. Must be a csv file.
      :type file: io.TextIOBase
      :param annotation_type: The type of the annotation.
      :type annotation_type: str

      :returns: The annotation config for the feature group.
      :rtype: AnnotationConfig



   .. py:method:: create_feature_group(table_name, sql, description = None, version_limit = 30)

      Creates a new FeatureGroup from a SQL statement.

      :param table_name: The unique name to be given to the FeatureGroup. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param sql: Input SQL statement for forming the FeatureGroup.
      :type sql: str
      :param description: The description about the FeatureGroup.
      :type description: str
      :param version_limit: The number of versions to preserve for the FeatureGroup (minimum 30).
      :type version_limit: int

      :returns: The created FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: create_feature_group_from_template(table_name, feature_group_template_id, template_bindings = None, should_attach_feature_group_to_template = True, description = None, version_limit = 30)

      Creates a new feature group from a SQL statement.

      :param table_name: The unique name to be given to the feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param feature_group_template_id: The unique ID associated with the template that will be used to create this feature group.
      :type feature_group_template_id: str
      :param template_bindings: Variable bindings that override the template's variable values.
      :type template_bindings: list
      :param should_attach_feature_group_to_template: Set to `False` to create a feature group but not leave it attached to the template that created it.
      :type should_attach_feature_group_to_template: bool
      :param description: A user-friendly description of this feature group.
      :type description: str
      :param version_limit: The number of versions to preserve for the feature group (minimum 30).
      :type version_limit: int

      :returns: The created feature group.
      :rtype: FeatureGroup



   .. py:method:: create_feature_group_from_function(table_name, function_source_code = None, function_name = None, input_feature_groups = None, description = None, cpu_size = None, memory = None, package_requirements = None, use_original_csv_names = False, python_function_name = None, python_function_bindings = None, use_gpu = None, version_limit = 30)

      Creates a new feature in a Feature Group from user-provided code. Currently supported code languages are Python.

      If a list of input feature groups are supplied, we will provide DataFrames (pandas, in the case of Python) with the materialized feature groups for those input feature groups as arguments to the function.

      This method expects the source code to be a valid language source file containing a function. This function needs to return a DataFrame when executed; this DataFrame will be used as the materialized version of this feature group table.


      :param table_name: The unique name to be given to the feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param function_source_code: Contents of a valid source code file in a supported Feature Group specification language (currently only Python). The source code should contain a function called function_name. A list of allowed import and system libraries for each language is specified in the user functions documentation section.
      :type function_source_code: str
      :param function_name: Name of the function found in the source code that will be executed (on the optional inputs) to materialize this feature group.
      :type function_name: str
      :param input_feature_groups: List of feature group names that are supplied to the function as parameters. Each of the parameters are materialized Dataframes (same type as the functions return value).
      :type input_feature_groups: list
      :param description: The description for this feature group.
      :type description: str
      :param cpu_size: Size of the CPU for the feature group function.
      :type cpu_size: CPUSize
      :param memory: Memory (in GB) for the feature group function.
      :type memory: MemorySize
      :param package_requirements: List of package requirements for the feature group function. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_original_csv_names: Defaults to False, if set it uses the original column names for input feature groups from CSV datasets.
      :type use_original_csv_names: bool
      :param python_function_name: Name of Python Function that contains the source code and function arguments.
      :type python_function_name: str
      :param python_function_bindings: List of python function arguments.
      :type python_function_bindings: List
      :param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
      :type use_gpu: bool
      :param version_limit: The number of versions to preserve for the feature group (minimum 30).
      :type version_limit: int

      :returns: The created feature group
      :rtype: FeatureGroup



   .. py:method:: create_sampling_feature_group(feature_group_id, table_name, sampling_config, description = None)

      Creates a new Feature Group defined as a sample of rows from another Feature Group.

      For efficiency, sampling is approximate unless otherwise specified. (e.g. the number of rows may vary slightly from what was requested).


      :param feature_group_id: The unique ID associated with the pre-existing Feature Group that will be sampled by this new Feature Group. i.e. the input for sampling.
      :type feature_group_id: str
      :param table_name: The unique name to be given to this sampling Feature Group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param sampling_config: Dictionary defining the sampling method and its parameters.
      :type sampling_config: SamplingConfig
      :param description: A human-readable description of this Feature Group.
      :type description: str

      :returns: The created Feature Group.
      :rtype: FeatureGroup



   .. py:method:: create_merge_feature_group(source_feature_group_id, table_name, merge_config, description = None)

      Creates a new feature group defined as the union of other feature group versions.

              Args:
                  source_feature_group_id (str): Unique string identifier corresponding to the dataset feature group that will have its versions merged into this feature group.
                  table_name (str): Unique string identifier to be given to this merge feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
                  merge_config (MergeConfig): JSON object defining the merging method and its parameters.
                  description (str): Human-readable description of this feature group.

              Returns:
                  FeatureGroup: The created feature group.
      Description:
      Creates a new feature group defined as the union of other feature group versions.



   .. py:method:: create_operator_feature_group(source_feature_group_id, table_name, operator_config, description = None)

      Creates a new Feature Group defined by a pre-defined operator applied to another Feature Group.

      :param source_feature_group_id: Unique string identifier corresponding to the Feature Group to which the operator will be applied.
      :type source_feature_group_id: str
      :param table_name: Unique string identifier for the operator Feature Group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param operator_config: The operator config is used to define the operator and its parameters.
      :type operator_config: OperatorConfig
      :param description: Human-readable description of the Feature Group.
      :type description: str

      :returns: The created Feature Group.
      :rtype: FeatureGroup



   .. py:method:: create_snapshot_feature_group(feature_group_version, table_name)

      Creates a Snapshot Feature Group corresponding to a specific Feature Group version.

      :param feature_group_version: Unique string identifier associated with the Feature Group version being snapshotted.
      :type feature_group_version: str
      :param table_name: Name for the newly created Snapshot Feature Group table. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str

      :returns: Feature Group corresponding to the newly created Snapshot.
      :rtype: FeatureGroup



   .. py:method:: create_online_feature_group(table_name, primary_key, description = None)

      Creates an Online Feature Group.

      :param table_name: Name for the newly created feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param primary_key: The primary key for indexing the online feature group.
      :type primary_key: str
      :param description: Human-readable description of the Feature Group.
      :type description: str

      :returns: The created online feature group.
      :rtype: FeatureGroup



   .. py:method:: set_feature_group_sampling_config(feature_group_id, sampling_config)

      Set a FeatureGroup’s sampling to the config values provided, so that the rows the FeatureGroup returns will be a sample of those it would otherwise have returned.

      :param feature_group_id: The unique identifier associated with the FeatureGroup.
      :type feature_group_id: str
      :param sampling_config: A JSON string object specifying the sampling method and parameters specific to that sampling method. An empty sampling_config indicates no sampling.
      :type sampling_config: SamplingConfig

      :returns: The updated FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: set_feature_group_merge_config(feature_group_id, merge_config)

      Set a MergeFeatureGroup’s merge config to the values provided, so that the feature group only returns a bounded range of an incremental dataset.

      :param feature_group_id: Unique identifier associated with the feature group.
      :type feature_group_id: str
      :param merge_config: JSON object string specifying the merge rule. An empty merge_config will default to only including the latest dataset version.
      :type merge_config: MergeConfig

      :returns: The updated FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: set_feature_group_operator_config(feature_group_id, operator_config)

      Set a OperatorFeatureGroup’s operator config to the values provided.

      :param feature_group_id: A unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param operator_config: A dictionary object specifying the pre-defined operations.
      :type operator_config: OperatorConfig

      :returns: The updated FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: set_feature_group_schema(feature_group_id, schema)

      Creates a new schema and points the feature group to the new feature group schema ID.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param schema: JSON string containing an array of objects with 'name' and 'dataType' properties.
      :type schema: list



   .. py:method:: create_feature(feature_group_id, name, select_expression)

      Creates a new feature in a Feature Group from a SQL select statement.

      :param feature_group_id: The unique ID associated with the Feature Group.
      :type feature_group_id: str
      :param name: The name of the feature to add.
      :type name: str
      :param select_expression: SQL SELECT expression to create the feature.
      :type select_expression: str

      :returns: A Feature Group object with the newly added feature.
      :rtype: FeatureGroup



   .. py:method:: add_feature_group_tag(feature_group_id, tag)

      Adds a tag to the feature group

      :param feature_group_id: Unique identifier of the feature group.
      :type feature_group_id: str
      :param tag: The tag to add to the feature group.
      :type tag: str



   .. py:method:: remove_feature_group_tag(feature_group_id, tag)

      Removes a tag from the specified feature group.

      :param feature_group_id: Unique string identifier of the feature group.
      :type feature_group_id: str
      :param tag: The tag to remove from the feature group.
      :type tag: str



   .. py:method:: add_annotatable_feature(feature_group_id, name, annotation_type)

      Add an annotatable feature in a Feature Group

      :param feature_group_id: The unique string identifier for the feature group.
      :type feature_group_id: str
      :param name: The name of the feature to add.
      :type name: str
      :param annotation_type: The type of annotation to set.
      :type annotation_type: str

      :returns: The feature group after the feature has been set
      :rtype: FeatureGroup



   .. py:method:: set_feature_as_annotatable_feature(feature_group_id, feature_name, annotation_type, feature_group_row_identifier_feature = None, doc_id_feature = None)

      Sets an existing feature as an annotatable feature (Feature that can be annotated).

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature to set as annotatable.
      :type feature_name: str
      :param annotation_type: The type of annotation label to add.
      :type annotation_type: str
      :param feature_group_row_identifier_feature: The key value of the feature group row the annotation is on (cast to string) and uniquely identifies the feature group row. At least one of the doc_id or key value must be provided so that the correct annotation can be identified.
      :type feature_group_row_identifier_feature: str
      :param doc_id_feature: The name of the document ID feature.
      :type doc_id_feature: str

      :returns: A feature group object with the newly added annotatable feature.
      :rtype: FeatureGroup



   .. py:method:: set_annotation_status_feature(feature_group_id, feature_name)

      Sets a feature as the annotation status feature for a feature group.

      :param feature_group_id: The ID of the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature to set as the annotation status feature.
      :type feature_name: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: unset_feature_as_annotatable_feature(feature_group_id, feature_name)

      Unsets a feature as annotatable

      :param feature_group_id: The unique string identifier of the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature to unset.
      :type feature_name: str

      :returns: The feature group after unsetting the feature
      :rtype: FeatureGroup



   .. py:method:: add_feature_group_annotation_label(feature_group_id, label_name, annotation_type, label_definition = None)

      Adds an annotation label

      :param feature_group_id: The unique string identifier of the feature group.
      :type feature_group_id: str
      :param label_name: The name of the label.
      :type label_name: str
      :param annotation_type: The type of the annotation to set.
      :type annotation_type: str
      :param label_definition: the definition of the label.
      :type label_definition: str

      :returns: The feature group after adding the annotation label
      :rtype: FeatureGroup



   .. py:method:: remove_feature_group_annotation_label(feature_group_id, label_name)

      Removes an annotation label

      :param feature_group_id: The unique string identifier of the feature group.
      :type feature_group_id: str
      :param label_name: The name of the label to remove.
      :type label_name: str

      :returns: The feature group after adding the annotation label
      :rtype: FeatureGroup



   .. py:method:: add_feature_tag(feature_group_id, feature, tag)

      Adds a tag on a feature

      :param feature_group_id: The unique string identifier of the feature group.
      :type feature_group_id: str
      :param feature: The feature to set the tag on.
      :type feature: str
      :param tag: The tag to set on the feature.
      :type tag: str



   .. py:method:: remove_feature_tag(feature_group_id, feature, tag)

      Removes a tag from a feature

      :param feature_group_id: The unique string identifier of the feature group.
      :type feature_group_id: str
      :param feature: The feature to remove the tag from.
      :type feature: str
      :param tag: The tag to remove.
      :type tag: str



   .. py:method:: create_nested_feature(feature_group_id, nested_feature_name, table_name, using_clause, where_clause = None, order_clause = None)

      Creates a new nested feature in a feature group from a SQL statement.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param nested_feature_name: The name of the feature.
      :type nested_feature_name: str
      :param table_name: The table name of the feature group to nest. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param using_clause: The SQL join column or logic to join the nested table with the parent.
      :type using_clause: str
      :param where_clause: A SQL WHERE statement to filter the nested rows.
      :type where_clause: str
      :param order_clause: A SQL clause to order the nested rows.
      :type order_clause: str

      :returns: A feature group object with the newly added nested feature.
      :rtype: FeatureGroup



   .. py:method:: update_nested_feature(feature_group_id, nested_feature_name, table_name = None, using_clause = None, where_clause = None, order_clause = None, new_nested_feature_name = None)

      Updates a previously existing nested feature in a feature group.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param nested_feature_name: The name of the feature to be updated.
      :type nested_feature_name: str
      :param table_name: The name of the table. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param using_clause: The SQL join column or logic to join the nested table with the parent.
      :type using_clause: str
      :param where_clause: An SQL WHERE statement to filter the nested rows.
      :type where_clause: str
      :param order_clause: An SQL clause to order the nested rows.
      :type order_clause: str
      :param new_nested_feature_name: New name for the nested feature.
      :type new_nested_feature_name: str

      :returns: A feature group object with the updated nested feature.
      :rtype: FeatureGroup



   .. py:method:: delete_nested_feature(feature_group_id, nested_feature_name)

      Delete a nested feature.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param nested_feature_name: The name of the feature to be deleted.
      :type nested_feature_name: str

      :returns: A feature group object without the specified nested feature.
      :rtype: FeatureGroup



   .. py:method:: create_point_in_time_feature(feature_group_id, feature_name, history_table_name, aggregation_keys, timestamp_key, historical_timestamp_key, expression, lookback_window_seconds = None, lookback_window_lag_seconds = 0, lookback_count = None, lookback_until_position = 0)

      Creates a new point in time feature in a feature group using another historical feature group, window spec, and aggregate expression.

      We use the aggregation keys and either the lookbackWindowSeconds or the lookbackCount values to perform the window aggregation for every row in the current feature group.

      If the window is specified in seconds, then all rows in the history table which match the aggregation keys and with historicalTimeFeature greater than or equal to lookbackStartCount and less than the value of the current rows timeFeature are considered. An optional lookbackWindowLagSeconds (+ve or -ve) can be used to offset the current value of the timeFeature. If this value is negative, we will look at the future rows in the history table, so care must be taken to ensure that these rows are available in the online context when we are performing a lookup on this feature group. If the window is specified in counts, then we order the historical table rows aligning by time and consider rows from the window where the rank order is greater than or equal to lookbackCount and includes the row just prior to the current one. The lag is specified in terms of positions using lookbackUntilPosition.


      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature to create.
      :type feature_name: str
      :param history_table_name: The table name of the history table.
      :type history_table_name: str
      :param aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation.
      :type aggregation_keys: list
      :param timestamp_key: Name of feature which contains the timestamp value for the point in time feature.
      :type timestamp_key: str
      :param historical_timestamp_key: Name of feature which contains the historical timestamp.
      :type historical_timestamp_key: str
      :param expression: SQL aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str
      :param lookback_window_seconds: If window is specified in terms of time, number of seconds in the past from the current time for start of the window.
      :type lookback_window_seconds: float
      :param lookback_window_lag_seconds: Optional lag to offset the closest point for the window. If it is positive, we delay the start of window. If it is negative, we are looking at the "future" rows in the history table.
      :type lookback_window_lag_seconds: float
      :param lookback_count: If window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, we delay the start of window by that many rows. If it is negative, we are looking at those many "future" rows in the history table.
      :type lookback_until_position: int

      :returns: A feature group object with the newly added nested feature.
      :rtype: FeatureGroup



   .. py:method:: update_point_in_time_feature(feature_group_id, feature_name, history_table_name = None, aggregation_keys = None, timestamp_key = None, historical_timestamp_key = None, expression = None, lookback_window_seconds = None, lookback_window_lag_seconds = None, lookback_count = None, lookback_until_position = None, new_feature_name = None)

      Updates an existing Point-in-Time (PiT) feature in a feature group. See `createPointInTimeFeature` for detailed semantics.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature.
      :type feature_name: str
      :param history_table_name: The table name of the history table. If not specified, we use the current table to do a self join.
      :type history_table_name: str
      :param aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation.
      :type aggregation_keys: list
      :param timestamp_key: Name of the feature which contains the timestamp value for the PiT feature.
      :type timestamp_key: str
      :param historical_timestamp_key: Name of the feature which contains the historical timestamp.
      :type historical_timestamp_key: str
      :param expression: SQL Aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str
      :param lookback_window_seconds: If the window is specified in terms of time, the number of seconds in the past from the current time for the start of the window.
      :type lookback_window_seconds: float
      :param lookback_window_lag_seconds: Optional lag to offset the closest point for the window. If it is positive, we delay the start of the window. If it is negative, we are looking at the "future" rows in the history table.
      :type lookback_window_lag_seconds: float
      :param lookback_count: If the window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, we delay the start of the window by that many rows. If it is negative, we are looking at those many "future" rows in the history table.
      :type lookback_until_position: int
      :param new_feature_name: New name for the PiT feature.
      :type new_feature_name: str

      :returns: A feature group object with the newly added nested feature.
      :rtype: FeatureGroup



   .. py:method:: create_point_in_time_group(feature_group_id, group_name, window_key, aggregation_keys, history_table_name = None, history_window_key = None, history_aggregation_keys = None, lookback_window = None, lookback_window_lag = 0, lookback_count = None, lookback_until_position = 0)

      Create a Point-in-Time Group

      :param feature_group_id: The unique ID associated with the feature group to add the point in time group to.
      :type feature_group_id: str
      :param group_name: The name of the point in time group.
      :type group_name: str
      :param window_key: Name of feature to use for ordering the rows on the source table.
      :type window_key: str
      :param aggregation_keys: List of keys to perform on the source table for the window aggregation.
      :type aggregation_keys: list
      :param history_table_name: The table to use for aggregating, if not provided, the source table will be used.
      :type history_table_name: str
      :param history_window_key: Name of feature to use for ordering the rows on the history table. If not provided, the windowKey from the source table will be used.
      :type history_window_key: str
      :param history_aggregation_keys: List of keys to use for join the historical table and performing the window aggregation. If not provided, the aggregationKeys from the source table will be used. Must be the same length and order as the source table's aggregationKeys.
      :type history_aggregation_keys: list
      :param lookback_window: Number of seconds in the past from the current time for the start of the window. If 0, the lookback will include all rows.
      :type lookback_window: float
      :param lookback_window_lag: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed. If it is negative, "future" rows in the history table are used.
      :type lookback_window_lag: float
      :param lookback_count: If window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed by that many rows. If it is negative, those many "future" rows in the history table are used.
      :type lookback_until_position: int

      :returns: The feature group after the point in time group has been created.
      :rtype: FeatureGroup



   .. py:method:: generate_point_in_time_features(feature_group_id, group_name, columns, window_functions, prefix = None)

      Generates and adds PIT features given the selected columns to aggregate over, and the operations to include.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param group_name: Name of the point-in-time group.
      :type group_name: str
      :param columns: List of columns to generate point-in-time features for.
      :type columns: list
      :param window_functions: List of window functions to operate on.
      :type window_functions: list
      :param prefix: Prefix for generated features, defaults to group name
      :type prefix: str

      :returns: Feature group object with newly added point-in-time features.
      :rtype: FeatureGroup



   .. py:method:: update_point_in_time_group(feature_group_id, group_name, window_key = None, aggregation_keys = None, history_table_name = None, history_window_key = None, history_aggregation_keys = None, lookback_window = None, lookback_window_lag = None, lookback_count = None, lookback_until_position = None)

      Update Point-in-Time Group

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param group_name: The name of the point-in-time group.
      :type group_name: str
      :param window_key: Name of feature which contains the timestamp value for the point-in-time feature.
      :type window_key: str
      :param aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation.
      :type aggregation_keys: list
      :param history_table_name: The table to use for aggregating, if not provided, the source table will be used.
      :type history_table_name: str
      :param history_window_key: Name of feature to use for ordering the rows on the history table. If not provided, the windowKey from the source table will be used.
      :type history_window_key: str
      :param history_aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation. If not provided, the aggregationKeys from the source table will be used. Must be the same length and order as the source table's aggregationKeys.
      :type history_aggregation_keys: list
      :param lookback_window: Number of seconds in the past from the current time for the start of the window.
      :type lookback_window: float
      :param lookback_window_lag: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed. If it is negative, future rows in the history table are looked at.
      :type lookback_window_lag: float
      :param lookback_count: If window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed by that many rows. If it is negative, those many future rows in the history table are looked at.
      :type lookback_until_position: int

      :returns: The feature group after the update has been applied.
      :rtype: FeatureGroup



   .. py:method:: delete_point_in_time_group(feature_group_id, group_name)

      Delete point in time group

      :param feature_group_id: The unique identifier associated with the feature group.
      :type feature_group_id: str
      :param group_name: The name of the point in time group.
      :type group_name: str

      :returns: The feature group after the point in time group has been deleted.
      :rtype: FeatureGroup



   .. py:method:: create_point_in_time_group_feature(feature_group_id, group_name, name, expression)

      Create point in time group feature

      :param feature_group_id: A unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param group_name: The name of the point-in-time group.
      :type group_name: str
      :param name: The name of the feature to add to the point-in-time group.
      :type name: str
      :param expression: A SQL aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str

      :returns: The feature group after the update has been applied.
      :rtype: FeatureGroup



   .. py:method:: update_point_in_time_group_feature(feature_group_id, group_name, name, expression)

      Update a feature's SQL expression in a point in time group

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param group_name: The name of the point-in-time group.
      :type group_name: str
      :param name: The name of the feature to add to the point-in-time group.
      :type name: str
      :param expression: SQL aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str

      :returns: The feature group after the update has been applied.
      :rtype: FeatureGroup



   .. py:method:: set_feature_type(feature_group_id, feature, feature_type, project_id = None)

      Set the type of a feature in a feature group. Specify the feature group ID, feature name, and feature type, and the method will return the new column with the changes reflected.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param feature: The name of the feature.
      :type feature: str
      :param feature_type: The machine learning type of the data in the feature.
      :type feature_type: str
      :param project_id: Optional unique ID associated with the project.
      :type project_id: str

      :returns: The feature group after the data_type is applied.
      :rtype: Schema



   .. py:method:: concatenate_feature_group_data(feature_group_id, source_feature_group_id, merge_type = 'UNION', replace_until_timestamp = None, skip_materialize = False)

      Concatenates data from one Feature Group to another. Feature Groups can be merged if their schemas are compatible, they have the special `updateTimestampKey` column, and (if set) the `primaryKey` column. The second operand in the concatenate operation will be appended to the first operand (merge target).

      :param feature_group_id: The destination Feature Group.
      :type feature_group_id: str
      :param source_feature_group_id: The Feature Group to concatenate with the destination Feature Group.
      :type source_feature_group_id: str
      :param merge_type: `UNION` or `INTERSECTION`.
      :type merge_type: str
      :param replace_until_timestamp: The UNIX timestamp to specify the point until which we will replace data from the source Feature Group.
      :type replace_until_timestamp: int
      :param skip_materialize: If `True`, will not materialize the concatenated Feature Group.
      :type skip_materialize: bool



   .. py:method:: remove_concatenation_config(feature_group_id)

      Removes the concatenation config on a destination feature group.

      :param feature_group_id: Unique identifier of the destination feature group to remove the concatenation configuration from.
      :type feature_group_id: str



   .. py:method:: set_feature_group_indexing_config(feature_group_id, primary_key = None, update_timestamp_key = None, lookup_keys = None)

      Sets various attributes of the feature group used for primary key, deployment lookups and streaming updates.

      :param feature_group_id: Unique string identifier for the feature group.
      :type feature_group_id: str
      :param primary_key: Name of the feature which defines the primary key of the feature group.
      :type primary_key: str
      :param update_timestamp_key: Name of the feature which defines the update timestamp of the feature group. Used in concatenation and primary key deduplication.
      :type update_timestamp_key: str
      :param lookup_keys: List of feature names which can be used in the lookup API to restrict the computation to a set of dataset rows. These feature names have to correspond to underlying dataset columns.
      :type lookup_keys: list



   .. py:method:: execute_async_feature_group_operation(query = None, fix_query_on_error = False, use_latest_version = True)

      Starts the execution of fg operation

      :param query: The SQL to be executed.
      :type query: str
      :param fix_query_on_error: If enabled, SQL query is auto fixed if parsing fails.
      :type fix_query_on_error: bool
      :param use_latest_version: If enabled, executes the query on the latest version of the feature group, and if version doesn't exist, FailedDependencyError is sent. If disabled, query is executed considering the latest feature group state irrespective of the latest version of the feature group.
      :type use_latest_version: bool

      :returns: A dict that contains the execution status
      :rtype: ExecuteFeatureGroupOperation



   .. py:method:: describe_async_feature_group_operation(feature_group_operation_run_id)

      Gets the status of the execution of fg operation

      :param feature_group_operation_run_id: The unique ID associated with the execution.
      :type feature_group_operation_run_id: str

      :returns: A dict that contains the execution status
      :rtype: ExecuteFeatureGroupOperation



   .. py:method:: update_feature_group(feature_group_id, description = None)

      Modify an existing Feature Group.

      :param feature_group_id: Unique identifier associated with the Feature Group.
      :type feature_group_id: str
      :param description: Description of the Feature Group.
      :type description: str

      :returns: Updated Feature Group object.
      :rtype: FeatureGroup



   .. py:method:: detach_feature_group_from_template(feature_group_id)

      Update a feature group to detach it from a template.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_feature_group_template_bindings(feature_group_id, template_bindings = None)

      Update the feature group template bindings for a template feature group.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param template_bindings: Values in these bindings override values set in the template.
      :type template_bindings: list

      :returns: Updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_feature_group_python_function_bindings(feature_group_id, python_function_bindings)

      Updates an existing Feature Group's Python function bindings from a user-provided Python Function. If a list of feature groups are supplied within the Python function bindings, we will provide DataFrames (Pandas in the case of Python) with the materialized feature groups for those input feature groups as arguments to the function.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param python_function_bindings: List of python function arguments.
      :type python_function_bindings: List



   .. py:method:: update_feature_group_python_function(feature_group_id, python_function_name, python_function_bindings = None, cpu_size = None, memory = None, use_gpu = None, use_original_csv_names = None)

      Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function

      bindings, we will provide as arguments to the function DataFrame's (pandas in the case of Python) with the materialized
      feature groups for those input feature groups.


      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param python_function_name: The name of the python function to be associated with the feature group.
      :type python_function_name: str
      :param python_function_bindings: List of python function arguments.
      :type python_function_bindings: List
      :param cpu_size: Size of the CPU for the feature group python function.
      :type cpu_size: CPUSize
      :param memory: Memory (in GB) for the feature group python function.
      :type memory: MemorySize
      :param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
      :type use_gpu: bool
      :param use_original_csv_names: If enabled, it uses the original column names for input feature groups from CSV datasets.
      :type use_original_csv_names: bool



   .. py:method:: update_feature_group_sql_definition(feature_group_id, sql)

      Updates the SQL statement for a feature group.

      :param feature_group_id: The unique identifier associated with the feature group.
      :type feature_group_id: str
      :param sql: The input SQL statement for the feature group.
      :type sql: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_dataset_feature_group_feature_expression(feature_group_id, feature_expression)

      Updates the SQL feature expression for a Dataset FeatureGroup's custom features

      :param feature_group_id: The unique identifier associated with the feature group.
      :type feature_group_id: str
      :param feature_expression: The input SQL statement for the feature group.
      :type feature_expression: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_feature(feature_group_id, name, select_expression = None, new_name = None)

      Modifies an existing feature in a feature group.

      :param feature_group_id: Unique identifier of the feature group.
      :type feature_group_id: str
      :param name: Name of the feature to be updated.
      :type name: str
      :param select_expression: SQL statement for modifying the feature.
      :type select_expression: str
      :param new_name: New name of the feature.
      :type new_name: str

      :returns: Updated feature group object.
      :rtype: FeatureGroup



   .. py:method:: export_feature_group_version_to_file_connector(feature_group_version, location, export_file_format, overwrite = False)

      Export Feature group to File Connector.

      :param feature_group_version: Unique string identifier for the feature group instance to export.
      :type feature_group_version: str
      :param location: Cloud file location to export to.
      :type location: str
      :param export_file_format: Enum string specifying the file format to export to.
      :type export_file_format: str
      :param overwrite: If true and a file exists at this location, this process will overwrite the file.
      :type overwrite: bool

      :returns: The FeatureGroupExport instance.
      :rtype: FeatureGroupExport



   .. py:method:: export_feature_group_version_to_database_connector(feature_group_version, database_connector_id, object_name, write_mode, database_feature_mapping, id_column = None, additional_id_columns = None)

      Export Feature group to Database Connector.

      :param feature_group_version: Unique string identifier for the Feature Group instance to export.
      :type feature_group_version: str
      :param database_connector_id: Unique string identifier for the Database Connector to export to.
      :type database_connector_id: str
      :param object_name: Name of the database object to write to.
      :type object_name: str
      :param write_mode: Enum string indicating whether to use INSERT or UPSERT.
      :type write_mode: str
      :param database_feature_mapping: Key/value pair JSON object of "database connector column" -> "feature name" pairs.
      :type database_feature_mapping: dict
      :param id_column: Required if write_mode is UPSERT. Indicates which database column should be used as the lookup key.
      :type id_column: str
      :param additional_id_columns: For database connectors which support it, additional ID columns to use as a complex key for upserting.
      :type additional_id_columns: list

      :returns: The FeatureGroupExport instance.
      :rtype: FeatureGroupExport



   .. py:method:: export_feature_group_version_to_console(feature_group_version, export_file_format)

      Export Feature group to console.

      :param feature_group_version: Unique string identifier of the Feature Group instance to export.
      :type feature_group_version: str
      :param export_file_format: File format to export to.
      :type export_file_format: str

      :returns: The FeatureGroupExport instance.
      :rtype: FeatureGroupExport



   .. py:method:: set_feature_group_modifier_lock(feature_group_id, locked = True)

      Lock a feature group to prevent modification.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param locked: Whether to disable or enable feature group modification (True or False).
      :type locked: bool



   .. py:method:: add_user_to_feature_group_modifiers(feature_group_id, email)

      Adds a user to a feature group.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param email: The email address of the user to be added.
      :type email: str



   .. py:method:: add_organization_group_to_feature_group_modifiers(feature_group_id, organization_group_id)

      Add OrganizationGroup to a feature group modifiers list

      :param feature_group_id: Unique string identifier of the feature group.
      :type feature_group_id: str
      :param organization_group_id: Unique string identifier of the organization group.
      :type organization_group_id: str



   .. py:method:: remove_user_from_feature_group_modifiers(feature_group_id, email)

      Removes a user from a specified feature group.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param email: The email address of the user to be removed.
      :type email: str



   .. py:method:: remove_organization_group_from_feature_group_modifiers(feature_group_id, organization_group_id)

      Removes an OrganizationGroup from a feature group modifiers list

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param organization_group_id: The unique ID associated with the organization group.
      :type organization_group_id: str



   .. py:method:: delete_feature(feature_group_id, name)

      Removes a feature from the feature group.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param name: Name of the feature to be deleted.
      :type name: str

      :returns: Updated feature group object.
      :rtype: FeatureGroup



   .. py:method:: delete_feature_group(feature_group_id)

      Deletes a Feature Group.

      :param feature_group_id: Unique string identifier for the feature group to be removed.
      :type feature_group_id: str



   .. py:method:: delete_feature_group_version(feature_group_version)

      Deletes a Feature Group Version.

      :param feature_group_version: String identifier for the feature group version to be removed.
      :type feature_group_version: str



   .. py:method:: create_feature_group_version(feature_group_id, variable_bindings = None)

      Creates a snapshot for a specified feature group. Triggers materialization of the feature group. The new version of the feature group is created after it has materialized.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str
      :param variable_bindings: Dictionary defining variable bindings that override parent feature group values.
      :type variable_bindings: dict

      :returns: A feature group version.
      :rtype: FeatureGroupVersion



   .. py:method:: set_feature_group_export_connector_config(feature_group_id, feature_group_export_config = None)

      Sets FG export config for the given feature group.

      :param feature_group_id: The unique ID associated with the pre-existing Feature Group for which export config is to be set.
      :type feature_group_id: str
      :param feature_group_export_config: The export config to be set for the given feature group.
      :type feature_group_export_config: FeatureGroupExportConfig



   .. py:method:: set_export_on_materialization(feature_group_id, enable)

      Can be used to enable or disable exporting feature group data to the export connector associated with the feature group.

      :param feature_group_id: The unique ID associated with the pre-existing Feature Group for which export config is to be set.
      :type feature_group_id: str
      :param enable: If true, will enable exporting feature group to the connector. If false, will disable.
      :type enable: bool



   .. py:method:: create_feature_group_template(feature_group_id, name, template_sql, template_variables, description = None, template_bindings = None, should_attach_feature_group_to_template = False)

      Create a feature group template.

      :param feature_group_id: Unique identifier of the feature group this template was created from.
      :type feature_group_id: str
      :param name: User-friendly name for this feature group template.
      :type name: str
      :param template_sql: The template SQL that will be resolved by applying values from the template variables to generate SQL for a feature group.
      :type template_sql: str
      :param template_variables: The template variables for resolving the template.
      :type template_variables: list
      :param description: Description of this feature group template.
      :type description: str
      :param template_bindings: If the feature group will be attached to the newly created template, set these variable bindings on that feature group.
      :type template_bindings: list
      :param should_attach_feature_group_to_template: Set to `True` to convert the feature group to a template feature group and attach it to the newly created template.
      :type should_attach_feature_group_to_template: bool

      :returns: The created feature group template.
      :rtype: FeatureGroupTemplate



   .. py:method:: delete_feature_group_template(feature_group_template_id)

      Delete an existing feature group template.

      :param feature_group_template_id: Unique string identifier associated with the feature group template.
      :type feature_group_template_id: str



   .. py:method:: update_feature_group_template(feature_group_template_id, template_sql = None, template_variables = None, description = None, name = None)

      Update a feature group template.

      :param feature_group_template_id: Unique identifier of the feature group template to update.
      :type feature_group_template_id: str
      :param template_sql: If provided, the new value to use for the template SQL.
      :type template_sql: str
      :param template_variables: If provided, the new value to use for the template variables.
      :type template_variables: list
      :param description: Description of this feature group template.
      :type description: str
      :param name: User-friendly name for this feature group template.
      :type name: str

      :returns: The updated feature group template.
      :rtype: FeatureGroupTemplate



   .. py:method:: preview_feature_group_template_resolution(feature_group_template_id = None, template_bindings = None, template_sql = None, template_variables = None, should_validate = True)

      Resolve template sql using template variables and template bindings.

      :param feature_group_template_id: Unique string identifier. If specified, use this template, otherwise assume an empty template.
      :type feature_group_template_id: str
      :param template_bindings: Values to override the template variable values specified by the template.
      :type template_bindings: list
      :param template_sql: If specified, use this as the template SQL instead of the feature group template's SQL.
      :type template_sql: str
      :param template_variables: Template variables to use. If a template is provided, this overrides the template's template variables.
      :type template_variables: list
      :param should_validate: If true, validates the resolved SQL.
      :type should_validate: bool

      :returns: The resolved template
      :rtype: ResolvedFeatureGroupTemplate



   .. py:method:: cancel_upload(upload_id)

      Cancels an upload.

      :param upload_id: A unique string identifier for the upload.
      :type upload_id: str



   .. py:method:: upload_part(upload_id, part_number, part_data)

      Uploads part of a large dataset file from your bucket to our system. Our system currently supports parts of up to 5GB and full files of up to 5TB. Note that each part must be at least 5MB in size, unless it is the last part in the sequence of parts for the full file.

      :param upload_id: A unique identifier for this upload.
      :type upload_id: str
      :param part_number: The 1-indexed number denoting the position of the file part in the sequence of parts for the full file.
      :type part_number: int
      :param part_data: The multipart/form-data for the current part of the full file.
      :type part_data: io.TextIOBase

      :returns: The object 'UploadPart' which encapsulates the hash and the etag for the part that got uploaded.
      :rtype: UploadPart



   .. py:method:: mark_upload_complete(upload_id)

      Marks an upload process as complete.

      :param upload_id: A unique string identifier for the upload process.
      :type upload_id: str

      :returns: The upload object associated with the process, containing details of the file.
      :rtype: Upload



   .. py:method:: create_dataset_from_file_connector(table_name, location, file_format = None, refresh_schedule = None, csv_delimiter = None, filename_column = None, start_prefix = None, until_prefix = None, sql_query = None, location_date_format = None, date_format_lookback_days = None, incremental = False, is_documentset = False, extract_bounding_boxes = False, document_processing_config = None, merge_file_schemas = False, reference_only_documentset = False, parsing_config = None, version_limit = 30)

      Creates a dataset from a file located in a cloud storage, such as Amazon AWS S3, using the specified dataset name and location.

      :param table_name: Organization-unique table name or the name of the feature group table to create using the source table.
      :type table_name: str
      :param location: The URI location format of the dataset source. The URI location format needs to be specified to match the `location_date_format` when `location_date_format` is specified. For example, Location = s3://bucket1/dir1/dir2/event_date=YYYY-MM-DD/* when `location_date_format` is specified. The URI location format needs to include both the `start_prefix` and `until_prefix` when both are specified. For example, Location s3://bucket1/dir1/* includes both s3://bucket1/dir1/dir2/event_date=2021-08-02/* and s3://bucket1/dir1/dir2/event_date=2021-08-08/*
      :type location: str
      :param file_format: The file format of the dataset.
      :type file_format: str
      :param refresh_schedule: The Cron time string format that describes a schedule to retrieve the latest version of the imported dataset. The time is specified in UTC.
      :type refresh_schedule: str
      :param csv_delimiter: If the file format is CSV, use a specific csv delimiter.
      :type csv_delimiter: str
      :param filename_column: Adds a new column to the dataset with the external URI path.
      :type filename_column: str
      :param start_prefix: The start prefix (inclusive) for a range based search on a cloud storage location URI.
      :type start_prefix: str
      :param until_prefix: The end prefix (exclusive) for a range based search on a cloud storage location URI.
      :type until_prefix: str
      :param sql_query: The SQL query to use when fetching data from the specified location. Use `__TABLE__` as a placeholder for the table name. For example: "SELECT * FROM __TABLE__ WHERE event_date > '2021-01-01'". If not provided, the entire dataset from the specified location will be imported.
      :type sql_query: str
      :param location_date_format: The date format in which the data is partitioned in the cloud storage location. For example, if the data is partitioned as s3://bucket1/dir1/dir2/event_date=YYYY-MM-DD/dir4/filename.parquet, then the `location_date_format` is YYYY-MM-DD. This format needs to be consistent across all files within the specified location.
      :type location_date_format: str
      :param date_format_lookback_days: The number of days to look back from the current day for import locations that are date partitioned. For example, import date 2021-06-04 with `date_format_lookback_days` = 3 will retrieve data for all the dates in the range [2021-06-02, 2021-06-04].
      :type date_format_lookback_days: int
      :param incremental: Signifies if the dataset is an incremental dataset.
      :type incremental: bool
      :param is_documentset: Signifies if the dataset is docstore dataset. A docstore dataset contains documents like images, PDFs, audio files etc. or is tabular data with links to such files.
      :type is_documentset: bool
      :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True.
      :type extract_bounding_boxes: bool
      :param document_processing_config: The document processing configuration. Only valid if is_documentset is True.
      :type document_processing_config: DatasetDocumentProcessingConfig
      :param merge_file_schemas: Signifies if the merge file schema policy is enabled. If is_documentset is True, this is also set to True by default.
      :type merge_file_schemas: bool
      :param reference_only_documentset: Signifies if the data reference only policy is enabled.
      :type reference_only_documentset: bool
      :param parsing_config: Custom config for dataset parsing.
      :type parsing_config: ParsingConfig
      :param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
      :type version_limit: int

      :returns: The dataset created.
      :rtype: Dataset



   .. py:method:: create_dataset_version_from_file_connector(dataset_id, location = None, file_format = None, csv_delimiter = None, merge_file_schemas = None, parsing_config = None, sql_query = None)

      Creates a new version of the specified dataset.

      :param dataset_id: Unique string identifier associated with the dataset.
      :type dataset_id: str
      :param location: External URI to import the dataset from. If not specified, the last location will be used.
      :type location: str
      :param file_format: File format to be used. If not specified, the service will try to detect the file format.
      :type file_format: str
      :param csv_delimiter: If the file format is CSV, use a specific CSV delimiter.
      :type csv_delimiter: str
      :param merge_file_schemas: Signifies if the merge file schema policy is enabled.
      :type merge_file_schemas: bool
      :param parsing_config: Custom config for dataset parsing.
      :type parsing_config: ParsingConfig
      :param sql_query: The SQL query to use when fetching data from the specified location. Use `__TABLE__` as a placeholder for the table name. For example: "SELECT * FROM __TABLE__ WHERE event_date > '2021-01-01'". If not provided, the entire dataset from the specified location will be imported.
      :type sql_query: str

      :returns: The new Dataset Version created.
      :rtype: DatasetVersion



   .. py:method:: create_dataset_from_database_connector(table_name, database_connector_id, object_name = None, columns = None, query_arguments = None, refresh_schedule = None, sql_query = None, incremental = False, attachment_parsing_config = None, incremental_database_connector_config = None, document_processing_config = None, version_limit = 30)

      Creates a dataset from a Database Connector.

      :param table_name: Organization-unique table name.
      :type table_name: str
      :param database_connector_id: Unique String Identifier of the Database Connector to import the dataset from.
      :type database_connector_id: str
      :param object_name: If applicable, the name/ID of the object in the service to query.
      :type object_name: str
      :param columns: The columns to query from the external service object.
      :type columns: str
      :param query_arguments: Additional query arguments to filter the data.
      :type query_arguments: str
      :param refresh_schedule: The Cron time string format that describes a schedule to retrieve the latest version of the imported dataset. The time is specified in UTC.
      :type refresh_schedule: str
      :param sql_query: The full SQL query to use when fetching data. If present, this parameter will override `object_name`, `columns`, `timestamp_column`, and `query_arguments`.
      :type sql_query: str
      :param incremental: Signifies if the dataset is an incremental dataset.
      :type incremental: bool
      :param attachment_parsing_config: The attachment parsing configuration. Only valid when attachments are being imported, either will take fg name and column name, or we will take list of urls to import (e.g. importing attachments via Salesforce).
      :type attachment_parsing_config: AttachmentParsingConfig
      :param incremental_database_connector_config: The config for incremental datasets. Only valid if incremental is True
      :type incremental_database_connector_config: IncrementalDatabaseConnectorConfig
      :param document_processing_config: The document processing configuration. Only valid when documents are being imported (e.g. importing KnowledgeArticleDescriptions via Salesforce).
      :type document_processing_config: DatasetDocumentProcessingConfig
      :param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
      :type version_limit: int

      :returns: The created dataset.
      :rtype: Dataset



   .. py:method:: create_dataset_from_application_connector(table_name, application_connector_id, dataset_config = None, refresh_schedule = None, version_limit = 30)

      Creates a dataset from an Application Connector.

      :param table_name: Organization-unique table name.
      :type table_name: str
      :param application_connector_id: Unique string identifier of the application connector to download data from.
      :type application_connector_id: str
      :param dataset_config: Dataset config for the application connector.
      :type dataset_config: ApplicationConnectorDatasetConfig
      :param refresh_schedule: Cron time string format that describes a schedule to retrieve the latest version of the imported dataset. The time is specified in UTC.
      :type refresh_schedule: str
      :param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
      :type version_limit: int

      :returns: The created dataset.
      :rtype: Dataset



   .. py:method:: create_dataset_version_from_database_connector(dataset_id, object_name = None, columns = None, query_arguments = None, sql_query = None)

      Creates a new version of the specified dataset.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str
      :param object_name: The name/ID of the object in the service to query. If not specified, the last name will be used.
      :type object_name: str
      :param columns: The columns to query from the external service object. If not specified, the last columns will be used.
      :type columns: str
      :param query_arguments: Additional query arguments to filter the data. If not specified, the last arguments will be used.
      :type query_arguments: str
      :param sql_query: The full SQL query to use when fetching data. If present, this parameter will override object_name, columns, and query_arguments.
      :type sql_query: str

      :returns: The new Dataset Version created.
      :rtype: DatasetVersion



   .. py:method:: create_dataset_version_from_application_connector(dataset_id, dataset_config = None)

      Creates a new version of the specified dataset.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str
      :param dataset_config: Dataset config for the application connector. If any of the fields are not specified, the last values will be used.
      :type dataset_config: ApplicationConnectorDatasetConfig

      :returns: The new Dataset Version created.
      :rtype: DatasetVersion



   .. py:method:: create_dataset_from_upload(table_name, file_format = None, csv_delimiter = None, is_documentset = False, extract_bounding_boxes = False, parsing_config = None, merge_file_schemas = False, document_processing_config = None, version_limit = 30)

      Creates a dataset and returns an upload ID that can be used to upload a file.

      :param table_name: Organization-unique table name for this dataset.
      :type table_name: str
      :param file_format: The file format of the dataset.
      :type file_format: str
      :param csv_delimiter: If the file format is CSV, use a specific CSV delimiter.
      :type csv_delimiter: str
      :param is_documentset: Signifies if the dataset is a docstore dataset. A docstore dataset contains documents like images, PDFs, audio files etc. or is tabular data with links to such files.
      :type is_documentset: bool
      :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True.
      :type extract_bounding_boxes: bool
      :param parsing_config: Custom config for dataset parsing.
      :type parsing_config: ParsingConfig
      :param merge_file_schemas: Signifies whether to merge the schemas of all files in the dataset. If is_documentset is True, this is also set to True by default.
      :type merge_file_schemas: bool
      :param document_processing_config: The document processing configuration. Only valid if is_documentset is True.
      :type document_processing_config: DatasetDocumentProcessingConfig
      :param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
      :type version_limit: int

      :returns: A reference to be used when uploading file parts.
      :rtype: Upload



   .. py:method:: create_dataset_version_from_upload(dataset_id, file_format = None)

      Creates a new version of the specified dataset using a local file upload.

      :param dataset_id: Unique string identifier associated with the dataset.
      :type dataset_id: str
      :param file_format: File format to be used. If not specified, the service will attempt to detect the file format.
      :type file_format: str

      :returns: Token to be used when uploading file parts.
      :rtype: Upload



   .. py:method:: create_dataset_version_from_document_reprocessing(dataset_id, document_processing_config = None)

      Creates a new dataset version for a source docstore dataset with the provided document processing configuration. This does not re-import the data but uses the same data which is imported in the latest dataset version and only performs document processing on it.

      :param dataset_id: The unique ID associated with the dataset to use as the source dataset.
      :type dataset_id: str
      :param document_processing_config: The document processing configuration to use for the new dataset version. If not specified, the document processing configuration from the source dataset will be used.
      :type document_processing_config: DatasetDocumentProcessingConfig

      :returns: The new dataset version created.
      :rtype: DatasetVersion



   .. py:method:: create_streaming_dataset(table_name, primary_key = None, update_timestamp_key = None, lookup_keys = None, version_limit = 30)

      Creates a streaming dataset. Use a streaming dataset if your dataset is receiving information from multiple sources over an extended period of time.

      :param table_name: The feature group table name to create for this dataset.
      :type table_name: str
      :param primary_key: The optional primary key column name for the dataset.
      :type primary_key: str
      :param update_timestamp_key: Name of the feature which defines the update timestamp of the feature group. Used in concatenation and primary key deduplication. Only relevant if lookup keys are set.
      :type update_timestamp_key: str
      :param lookup_keys: List of feature names which can be used in the lookup API to restrict the computation to a set of dataset rows. These feature names have to correspond to underlying dataset columns.
      :type lookup_keys: list
      :param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
      :type version_limit: int

      :returns: The streaming dataset created.
      :rtype: Dataset



   .. py:method:: create_realtime_content_store(table_name, application_connector_id, dataset_config = None)

      Creates a real-time content store dataset.

      :param table_name: Organization-unique table name.
      :type table_name: str
      :param application_connector_id: Unique string identifier of the application connector to download data from.
      :type application_connector_id: str
      :param dataset_config: Dataset config for the application connector.
      :type dataset_config: ApplicationConnectorDatasetConfig

      :returns: The created dataset.
      :rtype: Dataset



   .. py:method:: snapshot_streaming_data(dataset_id)

      Snapshots the current data in the streaming dataset.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str

      :returns: The new Dataset Version created by taking a snapshot of the current data in the streaming dataset.
      :rtype: DatasetVersion



   .. py:method:: set_dataset_column_data_type(dataset_id, column, data_type)

      Set a Dataset's column type.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str
      :param column: The name of the column.
      :type column: str
      :param data_type: The type of the data in the column. Note: Some ColumnMappings may restrict the options or explicitly set the DataType.
      :type data_type: DataType

      :returns: The dataset and schema after the data type has been set.
      :rtype: Dataset



   .. py:method:: create_dataset_from_streaming_connector(table_name, streaming_connector_id, dataset_config = None, refresh_schedule = None, version_limit = 30)

      Creates a dataset from a Streaming Connector

      :param table_name: Organization-unique table name
      :type table_name: str
      :param streaming_connector_id: Unique String Identifier for the Streaming Connector to import the dataset from
      :type streaming_connector_id: str
      :param dataset_config: Streaming dataset config
      :type dataset_config: StreamingConnectorDatasetConfig
      :param refresh_schedule: Cron time string format that describes a schedule to retrieve the latest version of the imported dataset. Time is specified in UTC.
      :type refresh_schedule: str
      :param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
      :type version_limit: int

      :returns: The created dataset.
      :rtype: Dataset



   .. py:method:: set_streaming_retention_policy(dataset_id, retention_hours = None, retention_row_count = None, ignore_records_before_timestamp = None)

      Sets the streaming retention policy.

      :param dataset_id: Unique string identifier for the streaming dataset.
      :type dataset_id: str
      :param retention_hours: Number of hours to retain streamed data in memory.
      :type retention_hours: int
      :param retention_row_count: Number of rows to retain streamed data in memory.
      :type retention_row_count: int
      :param ignore_records_before_timestamp: The Unix timestamp (in seconds) to use as a cutoff to ignore all entries sent before it
      :type ignore_records_before_timestamp: int



   .. py:method:: rename_database_connector(database_connector_id, name)

      Renames a Database Connector

      :param database_connector_id: The unique identifier for the database connector.
      :type database_connector_id: str
      :param name: The new name for the Database Connector.
      :type name: str



   .. py:method:: rename_application_connector(application_connector_id, name)

      Renames a Application Connector

      :param application_connector_id: The unique identifier for the application connector.
      :type application_connector_id: str
      :param name: A new name for the application connector.
      :type name: str



   .. py:method:: verify_database_connector(database_connector_id)

      Checks if Abacus.AI can access the specified database.

      :param database_connector_id: Unique string identifier for the database connector.
      :type database_connector_id: str



   .. py:method:: verify_file_connector(bucket)

      Checks to see if Abacus.AI can access the given bucket.

      :param bucket: The bucket to test.
      :type bucket: str

      :returns: The result of the verification.
      :rtype: FileConnectorVerification



   .. py:method:: delete_database_connector(database_connector_id)

      Delete a database connector.

      :param database_connector_id: The unique identifier for the database connector.
      :type database_connector_id: str



   .. py:method:: delete_application_connector(application_connector_id)

      Delete an application connector.

      :param application_connector_id: The unique identifier for the application connector.
      :type application_connector_id: str



   .. py:method:: delete_file_connector(bucket)

      Deletes a file connector

      :param bucket: The fully qualified URI of the bucket to remove.
      :type bucket: str



   .. py:method:: verify_application_connector(application_connector_id)

      Checks if Abacus.AI can access the application using the provided application connector ID.

      :param application_connector_id: Unique string identifier for the application connector.
      :type application_connector_id: str



   .. py:method:: set_azure_blob_connection_string(bucket, connection_string)

      Authenticates the specified Azure Blob Storage bucket using an authenticated Connection String.

      :param bucket: The fully qualified Azure Blob Storage Bucket URI.
      :type bucket: str
      :param connection_string: The Connection String Abacus.AI should use to authenticate when accessing this bucket.
      :type connection_string: str

      :returns: An object with the roleArn and verification status for the specified bucket.
      :rtype: FileConnectorVerification



   .. py:method:: verify_streaming_connector(streaming_connector_id)

      Checks to see if Abacus.AI can access the streaming connector.

      :param streaming_connector_id: Unique string identifier for the streaming connector to be checked for Abacus.AI access.
      :type streaming_connector_id: str



   .. py:method:: rename_streaming_connector(streaming_connector_id, name)

      Renames a Streaming Connector

      :param streaming_connector_id: The unique identifier for the streaming connector.
      :type streaming_connector_id: str
      :param name: A new name for the streaming connector.
      :type name: str



   .. py:method:: delete_streaming_connector(streaming_connector_id)

      Delete a streaming connector.

      :param streaming_connector_id: The unique identifier for the streaming connector.
      :type streaming_connector_id: str



   .. py:method:: create_streaming_token()

      Creates a streaming token for the specified project. Streaming tokens are used to authenticate requests when appending data to streaming datasets.

      :returns: The generated streaming token.
      :rtype: StreamingAuthToken



   .. py:method:: delete_streaming_token(streaming_token)

      Deletes the specified streaming token.

      :param streaming_token: The streaming token to delete.
      :type streaming_token: str



   .. py:method:: delete_dataset(dataset_id)

      Deletes the specified dataset from the organization.

      :param dataset_id: Unique string identifier of the dataset to delete.
      :type dataset_id: str



   .. py:method:: delete_dataset_version(dataset_version)

      Deletes the specified dataset version from the organization.

      :param dataset_version: String identifier of the dataset version to delete.
      :type dataset_version: str



   .. py:method:: get_docstore_page_data(doc_id, page, document_processing_config = None, document_processing_version = None)

      Returns the extracted page data for a document page.

      :param doc_id: A unique Docstore string identifier for the document.
      :type doc_id: str
      :param page: The page number to retrieve. Page numbers start from 0.
      :type page: int
      :param document_processing_config: The document processing configuration to use for returning the data when the document is processed via EXTRACT_DOCUMENT_DATA Feature Group Operator. If Feature Group Operator is not used, this parameter should be kept as None. If Feature Group Operator is used but this parameter is not provided, the latest available data or the default configuration will be used.
      :type document_processing_config: DocumentProcessingConfig
      :param document_processing_version: The document processing version to use for returning the data when the document is processed via EXTRACT_DOCUMENT_DATA Feature Group Operator. If Feature Group Operator is not used, this parameter should be kept as None. If Feature Group Operator is used but this parameter is not provided, the latest version will be used.
      :type document_processing_version: str

      :returns: The extracted page data.
      :rtype: PageData



   .. py:method:: get_docstore_document_data(doc_id, document_processing_config = None, document_processing_version = None, return_extracted_page_text = False)

      Returns the extracted data for a document.

      :param doc_id: A unique Docstore string identifier for the document.
      :type doc_id: str
      :param document_processing_config: The document processing configuration to use for returning the data when the document is processed via EXTRACT_DOCUMENT_DATA Feature Group Operator. If Feature Group Operator is not used, this parameter should be kept as None. If Feature Group Operator is used but this parameter is not provided, the latest available data or the default configuration will be used.
      :type document_processing_config: DocumentProcessingConfig
      :param document_processing_version: The document processing version to use for returning the data when the document is processed via EXTRACT_DOCUMENT_DATA Feature Group Operator. If Feature Group Operator is not used, this parameter should be kept as None. If Feature Group Operator is used but this parameter is not provided, the latest version will be used.
      :type document_processing_version: str
      :param return_extracted_page_text: Specifies whether to include a list of extracted text for each page in the response. Defaults to false if not provided.
      :type return_extracted_page_text: bool

      :returns: The extracted document data.
      :rtype: DocumentData



   .. py:method:: extract_document_data(document = None, doc_id = None, document_processing_config = None, start_page = None, end_page = None, return_extracted_page_text = False)

      Extracts data from a document using either OCR (for scanned documents/images) or embedded text extraction (for digital documents like .docx). Configure the extraction method through DocumentProcessingConfig

      :param document: The document to extract data from. One of document or doc_id must be provided.
      :type document: io.TextIOBase
      :param doc_id: A unique Docstore string identifier for the document. One of document or doc_id must be provided.
      :type doc_id: str
      :param document_processing_config: The document processing configuration.
      :type document_processing_config: DocumentProcessingConfig
      :param start_page: The starting page to extract data from. Pages are indexed starting from 0. If not provided, the first page will be used.
      :type start_page: int
      :param end_page: The last page to extract data from. Pages are indexed starting from 0. If not provided, the last page will be used.
      :type end_page: int
      :param return_extracted_page_text: Specifies whether to include a list of extracted text for each page in the response. Defaults to false if not provided.
      :type return_extracted_page_text: bool

      :returns: The extracted document data.
      :rtype: DocumentData



   .. py:method:: get_training_config_options(project_id, feature_group_ids = None, for_retrain = False, current_training_config = None)

      Retrieves the full initial description of the model training configuration options available for the specified project. The configuration options available are determined by the use case associated with the specified project. Refer to the [Use Case Documentation]({USE_CASES_URL}) for more information on use cases and use case-specific configuration options.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_ids: The feature group IDs to be used for training.
      :type feature_group_ids: List
      :param for_retrain: Whether the training config options are used for retraining.
      :type for_retrain: bool
      :param current_training_config: The current state of the training config, with some options set, which shall be used to get new options after refresh. This is `None` by default initially.
      :type current_training_config: TrainingConfig

      :returns: An array of options that can be specified when training a model in this project.
      :rtype: list[TrainingConfigOptions]



   .. py:method:: create_train_test_data_split_feature_group(project_id, training_config, feature_group_ids)

      Get the train and test data split without training the model. Only supported for models with custom algorithms.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param training_config: The training config used to influence how the split is calculated.
      :type training_config: TrainingConfig
      :param feature_group_ids: List of feature group IDs provided by the user, including the required one for data split and others to influence how to split.
      :type feature_group_ids: List

      :returns: The feature group containing the training data and folds information.
      :rtype: FeatureGroup



   .. py:method:: train_model(project_id, name = None, training_config = None, feature_group_ids = None, refresh_schedule = None, custom_algorithms = None, custom_algorithms_only = False, custom_algorithm_configs = None, builtin_algorithms = None, cpu_size = None, memory = None, algorithm_training_configs = None)

      Create a new model and start its training in the given project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param name: The name of the model. Defaults to "<Project Name> Model".
      :type name: str
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig
      :param feature_group_ids: List of feature group IDs provided by the user to train the model on.
      :type feature_group_ids: List
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically retrain the created model.
      :type refresh_schedule: str
      :param custom_algorithms: List of user-defined algorithms to train. If not set, the default enabled custom algorithms will be used.
      :type custom_algorithms: list
      :param custom_algorithms_only: Whether to only run custom algorithms.
      :type custom_algorithms_only: bool
      :param custom_algorithm_configs: Configs for each user-defined algorithm; key is the algorithm name, value is the config serialized to JSON.
      :type custom_algorithm_configs: dict
      :param builtin_algorithms: List of algorithm names or algorithm IDs of the builtin algorithms provided by Abacus.AI to train. If not set, all applicable builtin algorithms will be used.
      :type builtin_algorithms: list
      :param cpu_size: Size of the CPU for the user-defined algorithms during training.
      :type cpu_size: str
      :param memory: Memory (in GB) for the user-defined algorithms during training.
      :type memory: int
      :param algorithm_training_configs: List of algorithm specifc training configs that will be part of the model training AutoML run.
      :type algorithm_training_configs: list

      :returns: The new model which is being trained.
      :rtype: Model



   .. py:method:: create_model_from_python(project_id, function_source_code, train_function_name, training_input_tables, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, name = None, cpu_size = None, memory = None, training_config = None, exclusive_run = False, package_requirements = None, use_gpu = False, is_thread_safe = None)

      Initializes a new Model from user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.

      This method expects `functionSourceCode` to be a valid language source file which contains the functions named `trainFunctionName` and `predictFunctionName`. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName` and `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param function_source_code: Contents of a valid Python source code file. The source code should contain the functions named `trainFunctionName` and `predictFunctionName`. A list of allowed import and system libraries for each language is specified in the user functions documentation section.
      :type function_source_code: str
      :param train_function_name: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param training_input_tables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the functions return value).
      :type training_input_tables: list
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the source code that will be executed for batch prediction of the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param initialize_function_name: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model
      :type initialize_function_name: str
      :param name: The name you want your model to have. Defaults to "<Project Name> Model"
      :type name: str
      :param cpu_size: Size of the CPU for the model training function
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function
      :type memory: int
      :param training_config: Training configuration
      :type training_config: TrainingConfig
      :param exclusive_run: Decides if this model will be run exclusively or along with other Abacus.AI algorithms
      :type exclusive_run: bool
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool
      :param is_thread_safe: Whether this model is thread safe
      :type is_thread_safe: bool

      :returns: The new model, which has not been trained.
      :rtype: Model



   .. py:method:: rename_model(model_id, name)

      Renames a model

      :param model_id: Unique identifier of the model to rename.
      :type model_id: str
      :param name: The new name to assign to the model.
      :type name: str



   .. py:method:: update_python_model(model_id, function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None, training_config = None)

      Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.

      This method expects `functionSourceCode` to be a valid language source file which contains the functions named `trainFunctionName` and `predictFunctionName`. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName`. `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param model_id: The unique ID associated with the Python model to be changed.
      :type model_id: str
      :param function_source_code: Contents of a valid Python source code file. The source code should contain the functions named `trainFunctionName` and `predictFunctionName`. A list of allowed import and system libraries for each language is specified in the user functions documentation section.
      :type function_source_code: str
      :param train_function_name: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the source code that will be executed to run batch predictions through the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param initialize_function_name: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model.
      :type initialize_function_name: str
      :param training_input_tables: List of feature groups that are supplied to the `train` function as parameters. Each of the parameters are materialized DataFrames (same type as the functions return value).
      :type training_input_tables: list
      :param cpu_size: Size of the CPU for the model training function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: `['numpy==1.2.3', 'pandas>=1.4.0']`.
      :type package_requirements: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool
      :param is_thread_safe: Whether this model is thread safe
      :type is_thread_safe: bool
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig

      :returns: The updated model.
      :rtype: Model



   .. py:method:: update_python_model_zip(model_id, train_function_name = None, predict_function_name = None, predict_many_function_name = None, train_module_name = None, predict_module_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None)

      Updates an existing Python Model using a provided zip file. If a list of input feature groups are supplied, they will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.

      This method expects `trainModuleName` and `predictModuleName` to be valid language source files which contain the functions named `trainFunctionName` and `predictFunctionName`, respectively. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName`, and `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param model_id: The unique ID associated with the Python model to be changed.
      :type model_id: str
      :param train_function_name: Name of the function found in the train module that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the predict module that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the predict module that will be executed to run batch predictions through the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param train_module_name: Full path of the module that contains the train function from the root of the zip.
      :type train_module_name: str
      :param predict_module_name: Full path of the module that contains the predict function from the root of the zip.
      :type predict_module_name: str
      :param training_input_tables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the function's return value).
      :type training_input_tables: list
      :param cpu_size: Size of the CPU for the model training function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool

      :returns: The updated model.
      :rtype: Upload



   .. py:method:: update_python_model_git(model_id, application_connector_id = None, branch_name = None, python_root = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, train_module_name = None, predict_module_name = None, training_input_tables = None, cpu_size = None, memory = None, use_gpu = None)

      Updates an existing Python model using an existing Git application connector. If a list of input feature groups are supplied, these will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.

      This method expects `trainModuleName` and `predictModuleName` to be valid language source files which contain the functions named `trainFunctionName` and `predictFunctionName`, respectively. `trainFunctionName` returns the `ModelVersion` that is the result of training the model using `trainFunctionName`, and `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param model_id: The unique ID associated with the Python model to be changed.
      :type model_id: str
      :param application_connector_id: The unique ID associated with the Git application connector.
      :type application_connector_id: str
      :param branch_name: Name of the branch in the Git repository to be used for training.
      :type branch_name: str
      :param python_root: Path from the top level of the Git repository to the directory containing the Python source code. If not provided, the default is the root of the Git repository.
      :type python_root: str
      :param train_function_name: Name of the function found in train module that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the predict module that will be executed to run predictions through model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the predict module that will be executed to run batch predictions through model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param train_module_name: Full path of the module that contains the train function from the root of the zip.
      :type train_module_name: str
      :param predict_module_name: Full path of the module that contains the predict function from the root of the zip.
      :type predict_module_name: str
      :param training_input_tables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the functions return value).
      :type training_input_tables: list
      :param cpu_size: Size of the CPU for the model training function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function.
      :type memory: int
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool

      :returns: The updated model.
      :rtype: Model



   .. py:method:: set_model_training_config(model_id, training_config, feature_group_ids = None)

      Edits the default model training config

      :param model_id: A unique string identifier of the model to update.
      :type model_id: str
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig
      :param feature_group_ids: The list of feature groups used as input to the model.
      :type feature_group_ids: List

      :returns: The model object corresponding to the updated training config.
      :rtype: Model



   .. py:method:: set_model_objective(model_version, metric = None)

      Sets the best model for all model instances of the model based on the specified metric, and updates the training configuration to use the specified metric for any future model versions.

      If metric is set to None, then just use the default selection


      :param model_version: The model version to set as the best model.
      :type model_version: str
      :param metric: The metric to use to determine the best model.
      :type metric: str



   .. py:method:: set_model_prediction_params(model_id, prediction_config)

      Sets the model prediction config for the model

      :param model_id: Unique string identifier of the model to update.
      :type model_id: str
      :param prediction_config: Prediction configuration for the model.
      :type prediction_config: dict

      :returns: Model object after the prediction configuration is applied.
      :rtype: Model



   .. py:method:: retrain_model(model_id, deployment_ids = None, feature_group_ids = None, custom_algorithms = None, builtin_algorithms = None, custom_algorithm_configs = None, cpu_size = None, memory = None, training_config = None, algorithm_training_configs = None)

      Retrains the specified model, with an option to choose the deployments to which the retraining will be deployed.

      :param model_id: Unique string identifier of the model to retrain.
      :type model_id: str
      :param deployment_ids: List of unique string identifiers of deployments to automatically deploy to.
      :type deployment_ids: List
      :param feature_group_ids: List of feature group IDs provided by the user to train the model on.
      :type feature_group_ids: List
      :param custom_algorithms: List of user-defined algorithms to train. If not set, will honor the runs from the last time and applicable new custom algorithms.
      :type custom_algorithms: list
      :param builtin_algorithms: List of algorithm names or algorithm IDs of Abacus.AI built-in algorithms to train. If not set, will honor the runs from the last time and applicable new built-in algorithms.
      :type builtin_algorithms: list
      :param custom_algorithm_configs: User-defined training configs for each custom algorithm.
      :type custom_algorithm_configs: dict
      :param cpu_size: Size of the CPU for the user-defined algorithms during training.
      :type cpu_size: str
      :param memory: Memory (in GB) for the user-defined algorithms during training.
      :type memory: int
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig
      :param algorithm_training_configs: List of algorithm specifc training configs that will be part of the model training AutoML run.
      :type algorithm_training_configs: list

      :returns: The model that is being retrained.
      :rtype: Model



   .. py:method:: delete_model(model_id)

      Deletes the specified model and all its versions. Models which are currently used in deployments cannot be deleted.

      :param model_id: Unique string identifier of the model to delete.
      :type model_id: str



   .. py:method:: delete_model_version(model_version)

      Deletes the specified model version. Model versions which are currently used in deployments cannot be deleted.

      :param model_version: The unique identifier of the model version to delete.
      :type model_version: str



   .. py:method:: export_model_artifact_as_feature_group(model_version, table_name, artifact_type = None)

      Exports metric artifact data for a model as a feature group.

      :param model_version: Unique string identifier for the version of the model.
      :type model_version: str
      :param table_name: Name of the feature group table to create.
      :type table_name: str
      :param artifact_type: eval artifact type to export.
      :type artifact_type: EvalArtifactType

      :returns: The created feature group.
      :rtype: FeatureGroup



   .. py:method:: set_default_model_algorithm(model_id, algorithm = None, data_cluster_type = None)

      Sets the model's algorithm to default for all new deployments

      :param model_id: Unique identifier of the model to set.
      :type model_id: str
      :param algorithm: Algorithm to pin in the model.
      :type algorithm: str
      :param data_cluster_type: Data cluster type to set the lead model for.
      :type data_cluster_type: str



   .. py:method:: get_custom_train_function_info(project_id, feature_group_names_for_training = None, training_data_parameter_name_override = None, training_config = None, custom_algorithm_config = None)

      Returns information about how to call the custom train function.

      :param project_id: The unique version ID of the project.
      :type project_id: str
      :param feature_group_names_for_training: A list of feature group table names to be used for training.
      :type feature_group_names_for_training: list
      :param training_data_parameter_name_override: Override from feature group type to parameter name in the train function.
      :type training_data_parameter_name_override: dict
      :param training_config: Training config for the options supported by the Abacus.AI platform.
      :type training_config: TrainingConfig
      :param custom_algorithm_config: User-defined config that can be serialized by JSON.
      :type custom_algorithm_config: dict

      :returns: Information about how to call the customer-provided train function.
      :rtype: CustomTrainFunctionInfo



   .. py:method:: export_custom_model_version(model_version, output_location, algorithm = None)

      Bundle custom model artifacts to a zip file, and export to the specified location.

      :param model_version: A unique string identifier for the model version.
      :type model_version: str
      :param output_location: Location to export the model artifacts results. For example, s3://a-bucket/
      :type output_location: str
      :param algorithm: The algorithm to be exported. Optional if there's only one custom algorithm in the model version.
      :type algorithm: str

      :returns: Object describing the export and its status.
      :rtype: ModelArtifactsExport



   .. py:method:: create_model_monitor(project_id, prediction_feature_group_id, training_feature_group_id = None, name = None, refresh_schedule = None, target_value = None, target_value_bias = None, target_value_performance = None, feature_mappings = None, model_id = None, training_feature_mappings = None, feature_group_base_monitor_config = None, feature_group_comparison_monitor_config = None, exclude_interactive_performance_analysis = True, exclude_bias_analysis = None, exclude_performance_analysis = None, exclude_feature_drift_analysis = None, exclude_data_integrity_analysis = None)

      Runs a model monitor for the specified project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param prediction_feature_group_id: The unique ID of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: The unique ID of the training data feature group.
      :type training_feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically retrain the created model monitor.
      :type refresh_schedule: str
      :param target_value: A target positive value for the label to compute bias and PR/AUC for performance page.
      :type target_value: str
      :param target_value_bias: A target positive value for the label to compute bias.
      :type target_value_bias: str
      :param target_value_performance: A target positive value for the label to compute PR curve/AUC for performance page.
      :type target_value_performance: str
      :param feature_mappings: A JSON map to override features for prediction_feature_group, where keys are column names and the values are feature data use types.
      :type feature_mappings: dict
      :param model_id: The unique ID of the model.
      :type model_id: str
      :param training_feature_mappings: A JSON map to override features for training_fature_group, where keys are column names and the values are feature data use types.
      :type training_feature_mappings: dict
      :param feature_group_base_monitor_config: Selection strategy for the feature_group 1 with the feature group version if selected.
      :type feature_group_base_monitor_config: dict
      :param feature_group_comparison_monitor_config: Selection strategy for the feature_group 1 with the feature group version if selected.
      :type feature_group_comparison_monitor_config: dict
      :param exclude_interactive_performance_analysis: Whether to exclude interactive performance analysis. Defaults to True if not provided.
      :type exclude_interactive_performance_analysis: bool
      :param exclude_bias_analysis: Whether to exclude bias analysis in the model monitor. For default value bias analysis is included.
      :type exclude_bias_analysis: bool
      :param exclude_performance_analysis: Whether to exclude performance analysis in the model monitor. For default value performance analysis is included.
      :type exclude_performance_analysis: bool
      :param exclude_feature_drift_analysis: Whether to exclude feature drift analysis in the model monitor. For default value feature drift analysis is included.
      :type exclude_feature_drift_analysis: bool
      :param exclude_data_integrity_analysis: Whether to exclude data integrity analysis in the model monitor. For default value data integrity analysis is included.
      :type exclude_data_integrity_analysis: bool

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: rerun_model_monitor(model_monitor_id)

      Re-runs the specified model monitor.

      :param model_monitor_id: Unique string identifier of the model monitor to re-run.
      :type model_monitor_id: str

      :returns: The model monitor that is being re-run.
      :rtype: ModelMonitor



   .. py:method:: rename_model_monitor(model_monitor_id, name)

      Renames a model monitor

      :param model_monitor_id: Unique identifier of the model monitor to rename.
      :type model_monitor_id: str
      :param name: The new name to apply to the model monitor.
      :type name: str



   .. py:method:: delete_model_monitor(model_monitor_id)

      Deletes the specified Model Monitor and all its versions.

      :param model_monitor_id: Unique identifier of the Model Monitor to delete.
      :type model_monitor_id: str



   .. py:method:: delete_model_monitor_version(model_monitor_version)

      Deletes the specified model monitor version.

      :param model_monitor_version: Unique identifier of the model monitor version to delete.
      :type model_monitor_version: str



   .. py:method:: create_vision_drift_monitor(project_id, prediction_feature_group_id, training_feature_group_id, name, feature_mappings, training_feature_mappings, target_value_performance = None, refresh_schedule = None)

      Runs a vision drift monitor for the specified project.

      :param project_id: Unique string identifier of the project.
      :type project_id: str
      :param prediction_feature_group_id: Unique string identifier of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: Unique string identifier of the training data feature group.
      :type training_feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param feature_mappings: A JSON map to override features for prediction_feature_group, where keys are column names and the values are feature data use types.
      :type feature_mappings: dict
      :param training_feature_mappings: A JSON map to override features for training_feature_group, where keys are column names and the values are feature data use types.
      :type training_feature_mappings: dict
      :param target_value_performance: A target positive value for the label to compute precision-recall curve/area under curve for performance page.
      :type target_value_performance: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically rerun the created vision drift monitor.
      :type refresh_schedule: str

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: create_nlp_drift_monitor(project_id, prediction_feature_group_id, training_feature_group_id, name, feature_mappings, training_feature_mappings, target_value_performance = None, refresh_schedule = None)

      Runs an NLP drift monitor for the specified project.

      :param project_id: Unique string identifier of the project.
      :type project_id: str
      :param prediction_feature_group_id: Unique string identifier of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: Unique string identifier of the training data feature group.
      :type training_feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param feature_mappings: A JSON map to override features for prediction_feature_group, where keys are column names and the values are feature data use types.
      :type feature_mappings: dict
      :param training_feature_mappings: A JSON map to override features for training_feature_group, where keys are column names and the values are feature data use types.
      :type training_feature_mappings: dict
      :param target_value_performance: A target positive value for the label to compute precision-recall curve/area under curve for performance page.
      :type target_value_performance: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically rerun the created nlp drift monitor.
      :type refresh_schedule: str

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: create_forecasting_monitor(project_id, name, prediction_feature_group_id, training_feature_group_id, training_forecast_config, prediction_forecast_config, forecast_frequency, refresh_schedule = None)

      Runs a forecasting monitor for the specified project.

      :param project_id: Unique string identifier of the project.
      :type project_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param prediction_feature_group_id: Unique string identifier of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: Unique string identifier of the training data feature group.
      :type training_feature_group_id: str
      :param training_forecast_config: The configuration for the training data.
      :type training_forecast_config: ForecastingMonitorConfig
      :param prediction_forecast_config: The configuration for the prediction data.
      :type prediction_forecast_config: ForecastingMonitorConfig
      :param forecast_frequency: The frequency of the forecast. Defaults to the frequency of the prediction data.
      :type forecast_frequency: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically rerun the created forecasting monitor.
      :type refresh_schedule: str

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: create_eda(project_id, feature_group_id, name, refresh_schedule = None, include_collinearity = False, include_data_consistency = False, collinearity_keys = None, primary_keys = None, data_consistency_test_config = None, data_consistency_reference_config = None, feature_mappings = None, forecast_frequency = None)

      Run an Exploratory Data Analysis (EDA) for the specified project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_id: The unique ID of the prediction data feature group.
      :type feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> EDA".
      :type name: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically retrain the created EDA.
      :type refresh_schedule: str
      :param include_collinearity: Set to True if the EDA type is collinearity.
      :type include_collinearity: bool
      :param include_data_consistency: Set to True if the EDA type is data consistency.
      :type include_data_consistency: bool
      :param collinearity_keys: List of features to use for collinearity
      :type collinearity_keys: list
      :param primary_keys: List of features that corresponds to the primary keys or item ids for the given feature group for Data Consistency analysis or Forecasting analysis respectively.
      :type primary_keys: list
      :param data_consistency_test_config: Test feature group version selection strategy for Data Consistency EDA type.
      :type data_consistency_test_config: dict
      :param data_consistency_reference_config: Reference feature group version selection strategy for Data Consistency EDA type.
      :type data_consistency_reference_config: dict
      :param feature_mappings: A JSON map to override features for the given feature_group, where keys are column names and the values are feature data use types. (In forecasting, used to set the timestamp column and target value)
      :type feature_mappings: dict
      :param forecast_frequency: The frequency of the data. It can be either HOURLY, DAILY, WEEKLY, MONTHLY, QUARTERLY, YEARLY.
      :type forecast_frequency: str

      :returns: The new EDA object that was created.
      :rtype: Eda



   .. py:method:: rerun_eda(eda_id)

      Reruns the specified EDA object.

      :param eda_id: Unique string identifier of the EDA object to rerun.
      :type eda_id: str

      :returns: The EDA object that is being rerun.
      :rtype: Eda



   .. py:method:: rename_eda(eda_id, name)

      Renames an EDA

      :param eda_id: Unique string identifier of the EDA to rename.
      :type eda_id: str
      :param name: The new name to apply to the model monitor.
      :type name: str



   .. py:method:: delete_eda(eda_id)

      Deletes the specified EDA and all its versions.

      :param eda_id: Unique string identifier of the EDA to delete.
      :type eda_id: str



   .. py:method:: delete_eda_version(eda_version)

      Deletes the specified EDA version.

      :param eda_version: Unique string identifier of the EDA version to delete.
      :type eda_version: str



   .. py:method:: create_holdout_analysis(name, model_id, feature_group_ids, model_version = None, algorithm = None)

      Create a holdout analysis for a model

      :param name: Name of the holdout analysis
      :type name: str
      :param model_id: ID of the model to create a holdout analysis for
      :type model_id: str
      :param feature_group_ids: List of feature group IDs to use for the holdout analysis
      :type feature_group_ids: List
      :param model_version: (optional) Version of the model to use for the holdout analysis
      :type model_version: str
      :param algorithm: (optional) ID of algorithm to use for the holdout analysis
      :type algorithm: str

      :returns: The created holdout analysis
      :rtype: HoldoutAnalysis



   .. py:method:: rerun_holdout_analysis(holdout_analysis_id, model_version = None, algorithm = None)

      Rerun a holdout analysis. A different model version and algorithm can be specified which should be under the same model.

      :param holdout_analysis_id: ID of the holdout analysis to rerun
      :type holdout_analysis_id: str
      :param model_version: (optional) Version of the model to use for the holdout analysis
      :type model_version: str
      :param algorithm: (optional) ID of algorithm to use for the holdout analysis
      :type algorithm: str

      :returns: The created holdout analysis version
      :rtype: HoldoutAnalysisVersion



   .. py:method:: create_monitor_alert(project_id, alert_name, condition_config, action_config, model_monitor_id = None, realtime_monitor_id = None)

      Create a monitor alert for the given conditions and monitor. We can create monitor alert either for model monitor or real-time monitor.

      :param project_id: Unique string identifier for the project.
      :type project_id: str
      :param alert_name: Name of the alert.
      :type alert_name: str
      :param condition_config: Condition to run the actions for the alert.
      :type condition_config: AlertConditionConfig
      :param action_config: Configuration for the action of the alert.
      :type action_config: AlertActionConfig
      :param model_monitor_id: Unique string identifier for the model monitor created under the project.
      :type model_monitor_id: str
      :param realtime_monitor_id: Unique string identifier for the real-time monitor for the deployment created under the project.
      :type realtime_monitor_id: str

      :returns: Object describing the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: update_monitor_alert(monitor_alert_id, alert_name = None, condition_config = None, action_config = None)

      Update monitor alert

      :param monitor_alert_id: Unique identifier of the monitor alert.
      :type monitor_alert_id: str
      :param alert_name: Name of the alert.
      :type alert_name: str
      :param condition_config: Condition to run the actions for the alert.
      :type condition_config: AlertConditionConfig
      :param action_config: Configuration for the action of the alert.
      :type action_config: AlertActionConfig

      :returns: Object describing the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: run_monitor_alert(monitor_alert_id)

      Reruns a given monitor alert from latest monitor instance

      :param monitor_alert_id: Unique identifier of a monitor alert.
      :type monitor_alert_id: str

      :returns: Object describing the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: delete_monitor_alert(monitor_alert_id)

      Delets a monitor alert

      :param monitor_alert_id: The unique string identifier of the alert to delete.
      :type monitor_alert_id: str



   .. py:method:: create_prediction_operator(name, project_id, source_code = None, predict_function_name = None, initialize_function_name = None, feature_group_ids = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = False)

      Create a new prediction operator.

      :param name: Name of the prediction operator.
      :type name: str
      :param project_id: The unique ID of the associated project.
      :type project_id: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the function `predictFunctionName`, and the function 'initializeFunctionName' if defined.
      :type source_code: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions.
      :type predict_function_name: str
      :param initialize_function_name: Name of the optional initialize function found in the source code. This function will generate anything used by predictions, based on input feature groups.
      :type initialize_function_name: str
      :param feature_group_ids: List of feature groups that are supplied to the initialize function as parameters. Each of the parameters are materialized Dataframes. The order should match the initialize function's parameters.
      :type feature_group_ids: List
      :param cpu_size: Size of the CPU for the prediction operator.
      :type cpu_size: str
      :param memory: Memory (in GB) for the  prediction operator.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_gpu: Whether this prediction operator needs gpu.
      :type use_gpu: bool

      :returns: The created prediction operator object.
      :rtype: PredictionOperator



   .. py:method:: update_prediction_operator(prediction_operator_id, name = None, feature_group_ids = None, source_code = None, initialize_function_name = None, predict_function_name = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None)

      Update an existing prediction operator. This does not create a new version.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str
      :param name: Name of the prediction operator.
      :type name: str
      :param feature_group_ids: List of feature groups that are supplied to the initialize function as parameters. Each of the parameters are materialized Dataframes. The order should match the initialize function's parameters.
      :type feature_group_ids: List
      :param source_code: Contents of a valid Python source code file. The source code should contain the function `predictFunctionName`, and the function 'initializeFunctionName' if defined.
      :type source_code: str
      :param initialize_function_name: Name of the optional initialize function found in the source code. This function will generate anything used by predictions, based on input feature groups.
      :type initialize_function_name: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions.
      :type predict_function_name: str
      :param cpu_size: Size of the CPU for the prediction operator.
      :type cpu_size: str
      :param memory: Memory (in GB) for the  prediction operator.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_gpu: Whether this prediction operator needs gpu.
      :type use_gpu: bool

      :returns: The updated prediction operator object.
      :rtype: PredictionOperator



   .. py:method:: delete_prediction_operator(prediction_operator_id)

      Delete an existing prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str



   .. py:method:: deploy_prediction_operator(prediction_operator_id, auto_deploy = True)

      Deploy the prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str
      :param auto_deploy: Flag to enable the automatic deployment when a new prediction operator version is created.
      :type auto_deploy: bool

      :returns: The created deployment object.
      :rtype: Deployment



   .. py:method:: create_prediction_operator_version(prediction_operator_id)

      Create a new version of the prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str

      :returns: The created prediction operator version object.
      :rtype: PredictionOperatorVersion



   .. py:method:: delete_prediction_operator_version(prediction_operator_version)

      Delete a prediction operator version.

      :param prediction_operator_version: The unique ID of the prediction operator version.
      :type prediction_operator_version: str



   .. py:method:: create_deployment(name = None, model_id = None, model_version = None, algorithm = None, feature_group_id = None, project_id = None, description = None, calls_per_second = None, auto_deploy = True, start = True, enable_batch_streaming_updates = False, skip_metrics_check = False, model_deployment_config = None)

      Creates a deployment with the specified name and description for the specified model or feature group.

      A Deployment makes the trained model or feature group available for prediction requests.


      :param name: The name of the deployment.
      :type name: str
      :param model_id: The unique ID associated with the model.
      :type model_id: str
      :param model_version: The unique ID associated with the model version to deploy.
      :type model_version: str
      :param algorithm: The unique ID associated with the algorithm to deploy.
      :type algorithm: str
      :param feature_group_id: The unique ID associated with a feature group.
      :type feature_group_id: str
      :param project_id: The unique ID associated with a project.
      :type project_id: str
      :param description: The description for the deployment.
      :type description: str
      :param calls_per_second: The number of calls per second the deployment can handle.
      :type calls_per_second: int
      :param auto_deploy: Flag to enable the automatic deployment when a new Model Version finishes training.
      :type auto_deploy: bool
      :param start: If true, will start the deployment; otherwise will create offline
      :type start: bool
      :param enable_batch_streaming_updates: Flag to enable marking the feature group deployment to have a background process cache streamed in rows for quicker lookup.
      :type enable_batch_streaming_updates: bool
      :param skip_metrics_check: Flag to skip metric regression with this current deployment
      :type skip_metrics_check: bool
      :param model_deployment_config: The deployment config for model to deploy
      :type model_deployment_config: dict

      :returns: The new model or feature group deployment.
      :rtype: Deployment



   .. py:method:: create_deployment_token(project_id, name = None)

      Creates a deployment token for the specified project.

      Deployment tokens are used to authenticate requests to the prediction APIs and are scoped to the project level.


      :param project_id: The unique string identifier associated with the project.
      :type project_id: str
      :param name: The name of the deployment token.
      :type name: str

      :returns: The deployment token.
      :rtype: DeploymentAuthToken



   .. py:method:: update_deployment(deployment_id, description = None, auto_deploy = None, skip_metrics_check = None)

      Updates a deployment's properties.

      :param deployment_id: Unique identifier of the deployment to update.
      :type deployment_id: str
      :param description: The new description for the deployment.
      :type description: str
      :param auto_deploy: Flag to enable the automatic deployment when a new Model Version finishes training.
      :type auto_deploy: bool
      :param skip_metrics_check: Flag to skip metric regression with this current deployment. This field is only relevant when auto_deploy is on
      :type skip_metrics_check: bool



   .. py:method:: rename_deployment(deployment_id, name)

      Updates a deployment's name

      :param deployment_id: Unique string identifier for the deployment to update.
      :type deployment_id: str
      :param name: The new deployment name.
      :type name: str



   .. py:method:: set_auto_deployment(deployment_id, enable = None)

      Enable or disable auto deployment for the specified deployment.

      When a model is scheduled to retrain, deployments with auto deployment enabled will be marked to automatically promote the new model version. After the newly trained model completes, a check on its metrics in comparison to the currently deployed model version will be performed. If the metrics are comparable or better, the newly trained model version is automatically promoted. If not, it will be marked as a failed model version promotion with an error indicating poor metrics performance.


      :param deployment_id: The unique ID associated with the deployment.
      :type deployment_id: str
      :param enable: Enable or disable the autoDeploy property of the deployment.
      :type enable: bool



   .. py:method:: set_deployment_model_version(deployment_id, model_version, algorithm = None, model_deployment_config = None)

      Promotes a model version and/or algorithm to be the active served deployment version

      :param deployment_id: A unique identifier for the deployment.
      :type deployment_id: str
      :param model_version: A unique identifier for the model version.
      :type model_version: str
      :param algorithm: The algorithm to use for the model version. If not specified, the algorithm will be inferred from the model version.
      :type algorithm: str
      :param model_deployment_config: The deployment configuration for the model to deploy.
      :type model_deployment_config: dict



   .. py:method:: set_deployment_feature_group_version(deployment_id, feature_group_version)

      Promotes a feature group version to be served in the deployment.

      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str
      :param feature_group_version: Unique string identifier for the feature group version.
      :type feature_group_version: str



   .. py:method:: set_deployment_prediction_operator_version(deployment_id, prediction_operator_version)

      Promotes a prediction operator version to be served in the deployment.

      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str
      :param prediction_operator_version: Unique string identifier for the prediction operator version.
      :type prediction_operator_version: str



   .. py:method:: start_deployment(deployment_id)

      Restarts the specified deployment that was previously suspended.

      :param deployment_id: A unique string identifier associated with the deployment.
      :type deployment_id: str



   .. py:method:: stop_deployment(deployment_id)

      Stops the specified deployment.

      :param deployment_id: Unique string identifier of the deployment to be stopped.
      :type deployment_id: str



   .. py:method:: delete_deployment(deployment_id)

      Deletes the specified deployment. The deployment's models will not be affected. Note that the deployments are not recoverable after they are deleted.

      :param deployment_id: Unique string identifier of the deployment to delete.
      :type deployment_id: str



   .. py:method:: delete_deployment_token(deployment_token)

      Deletes the specified deployment token.

      :param deployment_token: The deployment token to delete.
      :type deployment_token: str



   .. py:method:: set_deployment_feature_group_export_file_connector_output(deployment_id, file_format = None, output_location = None)

      Sets the export output for the Feature Group Deployment to be a file connector.

      :param deployment_id: The ID of the deployment for which the export type is set.
      :type deployment_id: str
      :param file_format: The type of export output, either CSV or JSON.
      :type file_format: str
      :param output_location: The file connector (cloud) location where the output should be exported.
      :type output_location: str



   .. py:method:: set_deployment_feature_group_export_database_connector_output(deployment_id, database_connector_id, object_name, write_mode, database_feature_mapping, id_column = None, additional_id_columns = None)

      Sets the export output for the Feature Group Deployment to a Database connector.

      :param deployment_id: The ID of the deployment for which the export type is set.
      :type deployment_id: str
      :param database_connector_id: The unique string identifier of the database connector used.
      :type database_connector_id: str
      :param object_name: The object of the database connector to write to.
      :type object_name: str
      :param write_mode: The write mode to use when writing to the database connector, either UPSERT or INSERT.
      :type write_mode: str
      :param database_feature_mapping: The column/feature pairs mapping the features to the database columns.
      :type database_feature_mapping: dict
      :param id_column: The id column to use as the upsert key.
      :type id_column: str
      :param additional_id_columns: For database connectors which support it, a list of additional ID columns to use as a complex key for upserting.
      :type additional_id_columns: list



   .. py:method:: remove_deployment_feature_group_export_output(deployment_id)

      Removes the export type that is set for the Feature Group Deployment

      :param deployment_id: The ID of the deployment for which the export type is set.
      :type deployment_id: str



   .. py:method:: set_default_prediction_arguments(deployment_id, prediction_arguments, set_as_override = False)

      Sets the deployment config.

      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param prediction_arguments: The prediction arguments to set.
      :type prediction_arguments: PredictionArguments
      :param set_as_override: If True, use these arguments as overrides instead of defaults for predict calls
      :type set_as_override: bool

      :returns: description of the updated deployment.
      :rtype: Deployment



   .. py:method:: create_deployment_alert(deployment_id, alert_name, condition_config, action_config)

      Create a deployment alert for the given conditions.

      Only support batch prediction usage now.


      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str
      :param alert_name: Name of the alert.
      :type alert_name: str
      :param condition_config: Condition to run the actions for the alert.
      :type condition_config: AlertConditionConfig
      :param action_config: Configuration for the action of the alert.
      :type action_config: AlertActionConfig

      :returns: Object describing the deployment alert.
      :rtype: MonitorAlert



   .. py:method:: create_realtime_monitor(deployment_id, realtime_monitor_schedule = None, lookback_time = None)

      Real time monitors compute and monitor metrics of real time prediction data.

      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str
      :param realtime_monitor_schedule: The cron expression for triggering monitor.
      :type realtime_monitor_schedule: str
      :param lookback_time: Lookback time (in seconds) for each monitor trigger
      :type lookback_time: int

      :returns: Object describing the real-time monitor.
      :rtype: RealtimeMonitor



   .. py:method:: update_realtime_monitor(realtime_monitor_id, realtime_monitor_schedule = None, lookback_time = None)

      Update the real-time monitor associated with the real-time monitor id.

      :param realtime_monitor_id: Unique string identifier for the real-time monitor.
      :type realtime_monitor_id: str
      :param realtime_monitor_schedule: The cron expression for triggering monitor
      :type realtime_monitor_schedule: str
      :param lookback_time: Lookback time (in seconds) for each monitor trigger
      :type lookback_time: float

      :returns: Object describing the realtime monitor.
      :rtype: RealtimeMonitor



   .. py:method:: delete_realtime_monitor(realtime_monitor_id)

      Delete the real-time monitor associated with the real-time monitor id.

      :param realtime_monitor_id: Unique string identifier for the real-time monitor.
      :type realtime_monitor_id: str



   .. py:method:: create_refresh_policy(name, cron, refresh_type, project_id = None, dataset_ids = [], feature_group_id = None, model_ids = [], deployment_ids = [], batch_prediction_ids = [], model_monitor_ids = [], notebook_id = None, prediction_operator_id = None, feature_group_export_config = None)

      Creates a refresh policy with a particular cron pattern and refresh type. The cron is specified in UTC time.

      A refresh policy allows for the scheduling of a set of actions at regular intervals. This can be useful for periodically updating data that needs to be re-imported into the project for retraining.


      :param name: The name of the refresh policy.
      :type name: str
      :param cron: A cron-like string specifying the frequency of the refresh policy in UTC time.
      :type cron: str
      :param refresh_type: The refresh type used to determine what is being refreshed, such as a single dataset, dataset and model, or more.
      :type refresh_type: str
      :param project_id: Optionally, a project ID can be specified so that all datasets, models, deployments, batch predictions, prediction metrics, model monitrs, and notebooks are captured at the instant the policy was created.
      :type project_id: str
      :param dataset_ids: Comma-separated list of dataset IDs.
      :type dataset_ids: List
      :param feature_group_id: Feature Group ID associated with refresh policy.
      :type feature_group_id: str
      :param model_ids: Comma-separated list of model IDs.
      :type model_ids: List
      :param deployment_ids: Comma-separated list of deployment IDs.
      :type deployment_ids: List
      :param batch_prediction_ids: Comma-separated list of batch prediction IDs.
      :type batch_prediction_ids: List
      :param model_monitor_ids: Comma-separated list of model monitor IDs.
      :type model_monitor_ids: List
      :param notebook_id: Notebook ID associated with refresh policy.
      :type notebook_id: str
      :param prediction_operator_id: Prediction Operator ID associated with refresh policy.
      :type prediction_operator_id: str
      :param feature_group_export_config: Feature group export configuration.
      :type feature_group_export_config: FeatureGroupExportConfig

      :returns: The created refresh policy.
      :rtype: RefreshPolicy



   .. py:method:: delete_refresh_policy(refresh_policy_id)

      Delete a refresh policy.

      :param refresh_policy_id: Unique string identifier associated with the refresh policy to delete.
      :type refresh_policy_id: str



   .. py:method:: pause_refresh_policy(refresh_policy_id)

      Pauses a refresh policy

      :param refresh_policy_id: Unique identifier associated with the refresh policy to be paused.
      :type refresh_policy_id: str



   .. py:method:: resume_refresh_policy(refresh_policy_id)

      Resumes a refresh policy

      :param refresh_policy_id: The unique ID associated with this refresh policy.
      :type refresh_policy_id: str



   .. py:method:: run_refresh_policy(refresh_policy_id)

      Force a run of the refresh policy.

      :param refresh_policy_id: Unique string identifier associated with the refresh policy to be run.
      :type refresh_policy_id: str



   .. py:method:: update_refresh_policy(refresh_policy_id, name = None, cron = None, feature_group_export_config = None)

      Update the name or cron string of a refresh policy

      :param refresh_policy_id: Unique string identifier associated with the refresh policy.
      :type refresh_policy_id: str
      :param name: Name of the refresh policy to be updated.
      :type name: str
      :param cron: Cron string describing the schedule from the refresh policy to be updated.
      :type cron: str
      :param feature_group_export_config: Feature group export configuration to update a feature group refresh policy.
      :type feature_group_export_config: FeatureGroupExportConfig

      :returns: Updated refresh policy.
      :rtype: RefreshPolicy



   .. py:method:: lookup_features(deployment_token, deployment_id, query_data, limit_results = None, result_columns = None)

      Returns the feature group deployed in the feature store project.

      :param deployment_token: A deployment token used to authenticate access to created deployments. This token only authorizes predictions on deployments in this project, so it can be safely embedded inside an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the key is the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the value is the unique value of the same entity.
      :type query_data: dict
      :param limit_results: If provided, will limit the number of results to the value specified.
      :type limit_results: int
      :param result_columns: If provided, will limit the columns present in each result to the columns specified in this list.
      :type result_columns: list



   .. py:method:: predict(deployment_token, deployment_id, query_data, **kwargs)

      Returns a prediction for Predictive Modeling

      :param deployment_token: A deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, and is safe to embed in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the key is the column name (e.g. a column with name 'user_id' in the dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed, and the value is the unique value of the same entity.
      :type query_data: dict



   .. py:method:: predict_multiple(deployment_token, deployment_id, query_data)

      Returns a list of predictions for predictive modeling.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, and is safe to embed in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A list of dictionaries, where the 'key' is the column name (e.g. a column with name 'user_id' in the dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed, and the 'value' is the unique value of the same entity.
      :type query_data: list



   .. py:method:: predict_from_datasets(deployment_token, deployment_id, query_data)

      Returns a list of predictions for Predictive Modeling.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the 'key' is the source dataset name, and the 'value' is a list of records corresponding to the dataset rows.
      :type query_data: dict



   .. py:method:: predict_lead(deployment_token, deployment_id, query_data, explain_predictions = False, explainer_type = None)

      Returns the probability of a user being a lead based on their interaction with the service/product and their own attributes (e.g. income, assets, credit score, etc.). Note that the inputs to this method, wherever applicable, should be the column names in the dataset mapped to the column mappings in our system (e.g. column 'user_id' mapped to mapping 'LEAD_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary containing user attributes and/or user's interaction data with the product/service (e.g. number of clicks, items in cart, etc.).
      :type query_data: dict
      :param explain_predictions: Will explain predictions for leads
      :type explain_predictions: bool
      :param explainer_type: Type of explainer to use for explanations
      :type explainer_type: str



   .. py:method:: predict_churn(deployment_token, deployment_id, query_data, explain_predictions = False, explainer_type = None)

      Returns the probability of a user to churn out in response to their interactions with the item/product/service. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'churn_result' mapped to mapping 'CHURNED_YN' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where the 'key' will be the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the 'value' will be the unique value of the same entity.
      :type query_data: dict
      :param explain_predictions: Will explain predictions for churn
      :type explain_predictions: bool
      :param explainer_type: Type of explainer to use for explanations
      :type explainer_type: str



   .. py:method:: predict_takeover(deployment_token, deployment_id, query_data)

      Returns a probability for each class label associated with the types of fraud or a 'yes' or 'no' type label for the possibility of fraud. Note that the inputs to this method, wherever applicable, will be the column names in the dataset mapped to the column mappings in our system (e.g., column 'account_name' mapped to mapping 'ACCOUNT_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary containing account activity characteristics (e.g., login id, login duration, login type, IP address, etc.).
      :type query_data: dict



   .. py:method:: predict_fraud(deployment_token, deployment_id, query_data)

      Returns the probability of a transaction performed under a specific account being fraudulent or not. Note that the inputs to this method, wherever applicable, should be the column names in your dataset mapped to the column mappings in our system (e.g. column 'account_number' mapped to the mapping 'ACCOUNT_ID' in our system).

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary containing transaction attributes (e.g. credit card type, transaction location, transaction amount, etc.).
      :type query_data: dict



   .. py:method:: predict_class(deployment_token, deployment_id, query_data, threshold = None, threshold_class = None, thresholds = None, explain_predictions = False, fixed_features = None, nested = None, explainer_type = None)

      Returns a classification prediction

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the 'Key' is the column name (e.g. a column with the name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the 'Value' is the unique value of the same entity.
      :type query_data: dict
      :param threshold: A float value that is applied on the popular class label.
      :type threshold: float
      :param threshold_class: The label upon which the threshold is added (binary labels only).
      :type threshold_class: str
      :param thresholds: Maps labels to thresholds (multi-label classification only). Defaults to F1 optimal threshold if computed for the given class, else uses 0.5.
      :type thresholds: Dict
      :param explain_predictions: If True, returns the SHAP explanations for all input features.
      :type explain_predictions: bool
      :param fixed_features: A set of input features to treat as constant for explanations - only honored when the explainer type is KERNEL_EXPLAINER
      :type fixed_features: list
      :param nested: If specified generates prediction delta for each index of the specified nested feature.
      :type nested: str
      :param explainer_type: The type of explainer to use.
      :type explainer_type: str



   .. py:method:: predict_target(deployment_token, deployment_id, query_data, explain_predictions = False, fixed_features = None, nested = None, explainer_type = None)

      Returns a prediction from a classification or regression model. Optionally, includes explanations.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the 'key' is the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the 'value' is the unique value of the same entity.
      :type query_data: dict
      :param explain_predictions: If true, returns the SHAP explanations for all input features.
      :type explain_predictions: bool
      :param fixed_features: Set of input features to treat as constant for explanations - only honored when the explainer type is KERNEL_EXPLAINER
      :type fixed_features: list
      :param nested: If specified, generates prediction delta for each index of the specified nested feature.
      :type nested: str
      :param explainer_type: The type of explainer to use.
      :type explainer_type: str



   .. py:method:: get_anomalies(deployment_token, deployment_id, threshold = None, histogram = False)

      Returns a list of anomalies from the training dataset.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param threshold: The threshold score of what is an anomaly. Valid values are between 0.8 and 0.99.
      :type threshold: float
      :param histogram: If True, will return a histogram of the distribution of all points.
      :type histogram: bool



   .. py:method:: get_timeseries_anomalies(deployment_token, deployment_id, start_timestamp = None, end_timestamp = None, query_data = None, get_all_item_data = False, series_ids = None)

      Returns a list of anomalous timestamps from the training dataset.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param start_timestamp: timestamp from which anomalies have to be detected in the training data
      :type start_timestamp: str
      :param end_timestamp: timestamp to which anomalies have to be detected in the training data
      :type end_timestamp: str
      :param query_data: additional data on which anomaly detection has to be performed, it can either be a single record or list of records or a json string representing list of records
      :type query_data: dict
      :param get_all_item_data: set this to true if anomaly detection has to be performed on all the data related to input ids
      :type get_all_item_data: bool
      :param series_ids: list of series ids on which the anomaly detection has to be performed
      :type series_ids: List



   .. py:method:: is_anomaly(deployment_token, deployment_id, query_data = None)

      Returns a list of anomaly attributes based on login information for a specified account. Note that the inputs to this method, wherever applicable, should be the column names in the dataset mapped to the column mappings in our system (e.g. column 'account_name' mapped to mapping 'ACCOUNT_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: The input data for the prediction.
      :type query_data: dict



   .. py:method:: get_event_anomaly_score(deployment_token, deployment_id, query_data = None)

      Returns an anomaly score for an event.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: The input data for the prediction.
      :type query_data: dict



   .. py:method:: get_forecast(deployment_token, deployment_id, query_data, future_data = None, num_predictions = None, prediction_start = None, explain_predictions = False, explainer_type = None, get_item_data = False)

      Returns a list of forecasts for a given entity under the specified project deployment. Note that the inputs to the deployed model will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'holiday_yn' mapped to mapping 'FUTURE' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where 'Key' will be the column name (e.g. a column with name 'store_id' in your dataset) mapped to the column mapping ITEM_ID that uniquely identifies the entity against which forecasting is performed and 'Value' will be the unique value of the same entity.
      :type query_data: dict
      :param future_data: This will be a list of values known ahead of time that are relevant for forecasting (e.g. State Holidays, National Holidays, etc.). Each element is a dictionary, where the key and the value both will be of type 'str'. For example future data entered for a Store may be [{"Holiday":"No", "Promo":"Yes", "Date": "2015-07-31 00:00:00"}].
      :type future_data: list
      :param num_predictions: The number of timestamps to predict in the future.
      :type num_predictions: int
      :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
      :type prediction_start: str
      :param explain_predictions: Will explain predictions for forecasting
      :type explain_predictions: bool
      :param explainer_type: Type of explainer to use for explanations
      :type explainer_type: str
      :param get_item_data: Will return the data corresponding to items in query
      :type get_item_data: bool



   .. py:method:: get_k_nearest(deployment_token, deployment_id, vector, k = None, distance = None, include_score = False, catalog_id = None)

      Returns the k nearest neighbors for the provided embedding vector.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param vector: Input vector to perform the k nearest neighbors with.
      :type vector: list
      :param k: Overrideable number of items to return.
      :type k: int
      :param distance: Specify the distance function to use. Options include “dot“, “cosine“, “euclidean“, and “manhattan“. Default = “dot“
      :type distance: str
      :param include_score: If True, will return the score alongside the resulting embedding value.
      :type include_score: bool
      :param catalog_id: An optional parameter honored only for embeddings that provide a catalog id
      :type catalog_id: str



   .. py:method:: get_multiple_k_nearest(deployment_token, deployment_id, queries)

      Returns the k nearest neighbors for the queries provided.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param queries: List of mappings of format {"catalogId": "cat0", "vectors": [...], "k": 20, "distance": "euclidean"}. See `getKNearest` for additional information about the supported parameters.
      :type queries: list



   .. py:method:: get_labels(deployment_token, deployment_id, query_data, return_extracted_entities = False)

      Returns a list of scored labels for a document.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: Dictionary where key is "Content" and value is the text from which entities are to be extracted.
      :type query_data: dict
      :param return_extracted_entities: (Optional) If True, will return the extracted entities in simpler format
      :type return_extracted_entities: bool



   .. py:method:: get_entities_from_pdf(deployment_token, deployment_id, pdf = None, doc_id = None, return_extracted_features = False, verbose = False, save_extracted_features = None)

      Extracts text from the provided PDF and returns a list of recognized labels and their scores.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param pdf: (Optional) The pdf to predict on. One of pdf or docId must be specified.
      :type pdf: io.TextIOBase
      :param doc_id: (Optional) The pdf to predict on. One of pdf or docId must be specified.
      :type doc_id: str
      :param return_extracted_features: (Optional) If True, will return all extracted features (e.g. all tokens in a page) from the PDF. Default is False.
      :type return_extracted_features: bool
      :param verbose: (Optional) If True, will return all the extracted tokens probabilities for all the trained labels. Default is False.
      :type verbose: bool
      :param save_extracted_features: (Optional) If True, will save extracted features (i.e. page tokens) so that they can be fetched using the prediction docId. Default is False.
      :type save_extracted_features: bool



   .. py:method:: get_recommendations(deployment_token, deployment_id, query_data, num_items = None, page = None, exclude_item_ids = None, score_field = None, scaling_factors = None, restrict_items = None, exclude_items = None, explore_fraction = None, diversity_attribute_name = None, diversity_max_results_per_value = None)

      Returns a list of recommendations for a given user under the specified project deployment. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'time' mapped to mapping 'TIMESTAMP' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where 'Key' will be the column name (e.g. a column with name 'user_name' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the user against which recommendations are made and 'Value' will be the unique value of the same item. For example, if you have the column name 'user_name' mapped to the column mapping 'USER_ID', then the query must have the exact same column name (user_name) as key and the name of the user (John Doe) as value.
      :type query_data: dict
      :param num_items: The number of items to recommend on one page. By default, it is set to 50 items per page.
      :type num_items: int
      :param page: The page number to be displayed. For example, let's say that the num_items is set to 10 with the total recommendations list size of 50 recommended items, then an input value of 2 in the 'page' variable will display a list of items that rank from 11th to 20th.
      :type page: int
      :param score_field: The relative item scores are returned in a separate field named with the same name as the key (score_field) for this argument.
      :type score_field: str
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted.  Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there's a type of item that might be less popular but you want to promote it or there's an item that always comes up and you want to demote it.
      :type scaling_factors: list
      :param restrict_items: It allows you to restrict the recommendations to certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", "value3", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1", "value3", ...]" to which to restrict the recommendations to. Let's take an example where the input to restrict_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. This input will restrict the recommendations to SUVs and Sedans. This type of restriction is particularly useful if there's a list of items that you know is of use in some particular scenario and you want to restrict the recommendations only to that list.
      :type restrict_items: list
      :param exclude_items: It allows you to exclude certain items from the list of recommendations. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" to exclude from the recommendations. Let's take an example where the input to exclude_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. The resulting recommendation list will exclude all SUVs and Sedans. This is
      :type exclude_items: list
      :param explore_fraction: Explore fraction.
      :type explore_fraction: float
      :param diversity_attribute_name: item attribute column name which is used to ensure diversity of prediction results.
      :type diversity_attribute_name: str
      :param diversity_max_results_per_value: maximum number of results per value of diversity_attribute_name.
      :type diversity_max_results_per_value: int



   .. py:method:: get_personalized_ranking(deployment_token, deployment_id, query_data, preserve_ranks = None, preserve_unknown_items = False, scaling_factors = None)

      Returns a list of items with personalized promotions for a given user under the specified project deployment. Note that the inputs to this method, wherever applicable, should be the column names in the dataset mapped to the column mappings in our system (e.g. column 'item_code' mapped to mapping 'ITEM_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This should be a dictionary with two key-value pairs. The first pair represents a 'Key' where the column name (e.g. a column with name 'user_id' in the dataset) mapped to the column mapping USER_ID uniquely identifies the user against whom a prediction is made and a 'Value' which is the identifier value for that user. The second pair will have a 'Key' which will be the name of the column name (e.g. movie_name) mapped to ITEM_ID (unique item identifier) and a 'Value' which will be a list of identifiers that uniquely identifies those items.
      :type query_data: dict
      :param preserve_ranks: List of dictionaries of format {"column": "col0", "values": ["value0, value1"]}, where the ranks of items in query_data is preserved for all the items in "col0" with values, "value0" and "value1". This option is useful when the desired items are being recommended in the desired order and the ranks for those items need to be kept unchanged during recommendation generation.
      :type preserve_ranks: list
      :param preserve_unknown_items: If true, any items that are unknown to the model, will not be reranked, and the original position in the query will be preserved.
      :type preserve_unknown_items: bool
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted. Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there's a type of item that might be less popular but you want to promote it or there's an item that always comes up and you want to demote it.
      :type scaling_factors: list



   .. py:method:: get_ranked_items(deployment_token, deployment_id, query_data, preserve_ranks = None, preserve_unknown_items = False, score_field = None, scaling_factors = None, diversity_attribute_name = None, diversity_max_results_per_value = None)

      Returns a list of re-ranked items for a selected user when a list of items is required to be reranked according to the user's preferences. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'item_code' mapped to mapping 'ITEM_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary with two key-value pairs. The first pair represents a 'Key' where the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID uniquely identifies the user against whom a prediction is made and a 'Value' which is the identifier value for that user. The second pair will have a 'Key' which will be the name of the column name (e.g. movie_name) mapped to ITEM_ID (unique item identifier) and a 'Value' which will be a list of identifiers that uniquely identifies those items.
      :type query_data: dict
      :param preserve_ranks: List of dictionaries of format {"column": "col0", "values": ["value0, value1"]}, where the ranks of items in query_data is preserved for all the items in "col0" with values, "value0" and "value1". This option is useful when the desired items are being recommended in the desired order and the ranks for those items need to be kept unchanged during recommendation generation.
      :type preserve_ranks: list
      :param preserve_unknown_items: If true, any items that are unknown to the model, will not be reranked, and the original position in the query will be preserved
      :type preserve_unknown_items: bool
      :param score_field: The relative item scores are returned in a separate field named with the same name as the key (score_field) for this argument.
      :type score_field: str
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted. Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there is a type of item that might be less popular but you want to promote it or there is an item that always comes up and you want to demote it.
      :type scaling_factors: list
      :param diversity_attribute_name: item attribute column name which is used to ensure diversity of prediction results.
      :type diversity_attribute_name: str
      :param diversity_max_results_per_value: maximum number of results per value of diversity_attribute_name.
      :type diversity_max_results_per_value: int



   .. py:method:: get_related_items(deployment_token, deployment_id, query_data, num_items = None, page = None, scaling_factors = None, restrict_items = None, exclude_items = None)

      Returns a list of related items for a given item under the specified project deployment. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'item_code' mapped to mapping 'ITEM_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where the 'key' will be the column name (e.g. a column with name 'user_name' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the user against which related items are determined and the 'value' will be the unique value of the same item. For example, if you have the column name 'user_name' mapped to the column mapping 'USER_ID', then the query must have the exact same column name (user_name) as key and the name of the user (John Doe) as value.
      :type query_data: dict
      :param num_items: The number of items to recommend on one page. By default, it is set to 50 items per page.
      :type num_items: int
      :param page: The page number to be displayed. For example, let's say that the num_items is set to 10 with the total recommendations list size of 50 recommended items, then an input value of 2 in the 'page' variable will display a list of items that rank from 11th to 20th.
      :type page: int
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted.  Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there's a type of item that might be less popular but you want to promote it or there's an item that always comes up and you want to demote it.
      :type scaling_factors: list
      :param restrict_items: It allows you to restrict the recommendations to certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", "value3", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1", "value3", ...]" to which to restrict the recommendations to. Let's take an example where the input to restrict_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. This input will restrict the recommendations to SUVs and Sedans. This type of restriction is particularly useful if there's a list of items that you know is of use in some particular scenario and you want to restrict the recommendations only to that list.
      :type restrict_items: list
      :param exclude_items: It allows you to exclude certain items from the list of recommendations. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" to exclude from the recommendations. Let's take an example where the input to exclude_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. The resulting recommendation list will exclude all SUVs and Sedans. This is particularly useful if there's a list of items that you know is of no use in some particular scenario and you don't want to show those items present in that list.
      :type exclude_items: list



   .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None)

      Return a chat response which continues the conversation based on the input messages and search results.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param messages: A list of chronologically ordered messages, starting with a user message and alternating sources. A message is a dict with attributes:     is_user (bool): Whether the message is from the user.      text (str): The message's text.
      :type messages: list
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifying the query chat config override.
      :type chat_config: dict



   .. py:method:: get_chat_response_with_binary_data(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, attachments = None)

      Return a chat response which continues the conversation based on the input messages and search results.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param messages: A list of chronologically ordered messages, starting with a user message and alternating sources. A message is a dict with attributes:     is_user (bool): Whether the message is from the user.      text (str): The message's text.
      :type messages: list
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifying the query chat config override.
      :type chat_config: dict
      :param attachments: A dictionary of binary data to use to answer the queries.
      :type attachments: None



   .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)

      Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param message: A message from the user
      :type message: str
      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrived search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifiying the query chat config override.
      :type chat_config: dict
      :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list



   .. py:method:: get_conversation_response_with_binary_data(deployment_id, deployment_token, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, attachments = None)

      Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param message: A message from the user
      :type message: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrived search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifiying the query chat config override.
      :type chat_config: dict
      :param attachments: A dictionary of binary data to use to answer the queries.
      :type attachments: None



   .. py:method:: get_search_results(deployment_token, deployment_id, query_data, num = 15)

      Return the most relevant search results to the search query from the uploaded documents.

      :param deployment_token: A token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be securely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the key is "Content" and the value is the text from which entities are to be extracted.
      :type query_data: dict
      :param num: Number of search results to return.
      :type num: int



   .. py:method:: get_sentiment(deployment_token, deployment_id, document)

      Predicts sentiment on a document

      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for a deployment created under this project.
      :type deployment_id: str
      :param document: The document to be analyzed for sentiment.
      :type document: str



   .. py:method:: get_entailment(deployment_token, deployment_id, document)

      Predicts the classification of the document

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param document: The document to be classified.
      :type document: str



   .. py:method:: get_classification(deployment_token, deployment_id, document)

      Predicts the classification of the document

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param document: The document to be classified.
      :type document: str



   .. py:method:: get_summary(deployment_token, deployment_id, query_data)

      Returns a JSON of the predicted summary for the given document. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'text' mapped to mapping 'DOCUMENT' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: Raw data dictionary containing the required document data - must have a key 'document' corresponding to a DOCUMENT type text as value.
      :type query_data: dict



   .. py:method:: predict_language(deployment_token, deployment_id, query_data)

      Predicts the language of the text

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments within this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: The input string to detect.
      :type query_data: str



   .. py:method:: get_assignments(deployment_token, deployment_id, query_data, forced_assignments = None, solve_time_limit_seconds = None, include_all_assignments = False)

      Get all positive assignments that match a query.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
      :type query_data: dict
      :param forced_assignments: Set of assignments to force and resolve before returning query results.
      :type forced_assignments: dict
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float
      :param include_all_assignments: If True, will return all assignments, including assignments with value 0. Default is False.
      :type include_all_assignments: bool



   .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None, best_alternate_only = False)

      Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
      :type query_data: dict
      :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
      :type add_constraints: list
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float
      :param best_alternate_only: When True only the best alternate will be returned, when False multiple alternates are returned
      :type best_alternate_only: bool



   .. py:method:: get_assignments_online_with_new_serialized_inputs(deployment_token, deployment_id, query_data = None, solve_time_limit_seconds = None)

      Get assignments for given query, with new inputs

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: a dictionary with assignment, constraint and constraint_equations_df
      :type query_data: dict
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float



   .. py:method:: check_constraints(deployment_token, deployment_id, query_data)

      Check for any constraints violated by the overrides.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: Assignment overrides to the solution.
      :type query_data: dict



   .. py:method:: predict_with_binary_data(deployment_token, deployment_id, blob)

      Make predictions for a given blob, e.g. image, audio

      :param deployment_token: A token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param blob: The multipart/form-data of the data.
      :type blob: io.TextIOBase



   .. py:method:: describe_image(deployment_token, deployment_id, image, categories, top_n = None)

      Describe the similarity between an image and a list of categories.

      :param deployment_token: Authentication token to access created deployments. This token is only authorized to predict on deployments in the current project, and can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: Unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param image: Image to describe.
      :type image: io.TextIOBase
      :param categories: List of candidate categories to compare with the image.
      :type categories: list
      :param top_n: Return the N most similar categories.
      :type top_n: int



   .. py:method:: get_text_from_document(deployment_token, deployment_id, document = None, adjust_doc_orientation = False, save_predicted_pdf = False, save_extracted_features = False)

      Generate text from a document

      :param deployment_token: Authentication token to access created deployments. This token is only authorized to predict on deployments in the current project, and can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: Unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param document: Input document which can be an image, pdf, or word document (Some formats might not be supported yet)
      :type document: io.TextIOBase
      :param adjust_doc_orientation: (Optional) whether to detect the document page orientation and rotate it if needed.
      :type adjust_doc_orientation: bool
      :param save_predicted_pdf: (Optional) If True, will save the predicted pdf bytes so that they can be fetched using the prediction docId. Default is False.
      :type save_predicted_pdf: bool
      :param save_extracted_features: (Optional) If True, will save extracted features (i.e. page tokens) so that they can be fetched using the prediction docId. Default is False.
      :type save_extracted_features: bool



   .. py:method:: transcribe_audio(deployment_token, deployment_id, audio)

      Transcribe the audio

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to make predictions on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param audio: The audio to transcribe.
      :type audio: io.TextIOBase



   .. py:method:: classify_image(deployment_token, deployment_id, image = None, doc_id = None)

      Classify an image.

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier to a deployment created under the project.
      :type deployment_id: str
      :param image: The binary data of the image to classify. One of image or doc_id must be specified.
      :type image: io.TextIOBase
      :param doc_id: The document ID of the image. One of image or doc_id must be specified.
      :type doc_id: str



   .. py:method:: classify_pdf(deployment_token, deployment_id, pdf = None)

      Returns a classification prediction from a PDF

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param pdf: (Optional) The pdf to predict on. One of pdf or docId must be specified.
      :type pdf: io.TextIOBase



   .. py:method:: get_cluster(deployment_token, deployment_id, query_data)

      Predicts the cluster for given data.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where each 'key' represents a column name and its corresponding 'value' represents the value of that column. For Timeseries Clustering, the 'key' should be ITEM_ID, and its value should represent a unique item ID that needs clustering.
      :type query_data: dict



   .. py:method:: get_objects_from_image(deployment_token, deployment_id, image)

      Classify an image.

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier to a deployment created under the project.
      :type deployment_id: str
      :param image: The binary data of the image to detect objects from.
      :type image: io.TextIOBase



   .. py:method:: score_image(deployment_token, deployment_id, image)

      Score on image.

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier to a deployment created under the project.
      :type deployment_id: str
      :param image: The binary data of the image to get the score.
      :type image: io.TextIOBase



   .. py:method:: transfer_style(deployment_token, deployment_id, source_image, style_image)

      Change the source image to adopt the visual style from the style image.

      :param deployment_token: A token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param source_image: The source image to apply the makeup.
      :type source_image: io.TextIOBase
      :param style_image: The image that has the style as a reference.
      :type style_image: io.TextIOBase



   .. py:method:: generate_image(deployment_token, deployment_id, query_data)

      Generate an image from text prompt.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: Specifies the text prompt. For example, {'prompt': 'a cat'}
      :type query_data: dict



   .. py:method:: execute_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None)

      Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict



   .. py:method:: get_matrix_agent_schema(deployment_token, deployment_id, query, doc_infos = None, deployment_conversation_id = None, external_session_id = None)

      Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param query: User input query to initialize the matrix computation.
      :type query: str
      :param doc_infos: An optional list of documents use for constructing the matrix. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str
      :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
      :type external_session_id: str



   .. py:method:: execute_conversation_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, regenerate = False, doc_infos = None, agent_workflow_node_id = None)

      Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str
      :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
      :type external_session_id: str
      :param regenerate: If True, will regenerate the response from the last query.
      :type regenerate: bool
      :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list
      :param agent_workflow_node_id: An optional agent workflow node id to trigger agent execution from an intermediate node.
      :type agent_workflow_node_id: str



   .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None, filter_clause = None, crowding_limits = None, include_text_search = False)

      Lookup document retrievers and return the matching documents from the document retriever deployed with given query.

      Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
      from the document retriever. The returned chunks could be expanded to include more words from the original documents and merged if they
      are overlapping, and permitted by the settings provided. The returned chunks are sorted by relevance.


      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments within this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param data: The query to search for.
      :type data: str
      :param filters: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filters: dict
      :param num: If provided, will limit the number of results to the value specified.
      :type num: int
      :param result_columns: If provided, will limit the column properties present in each result to those specified in this list.
      :type result_columns: list
      :param max_words: If provided, will limit the total number of words in the results to the value specified.
      :type max_words: int
      :param num_retrieval_margin_words: If provided, will add this number of words from left and right of the returned chunks.
      :type num_retrieval_margin_words: int
      :param max_words_per_chunk: If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
      :type max_words_per_chunk: int
      :param score_multiplier_column: If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
      :type score_multiplier_column: str
      :param min_score: If provided, will filter out the results with score less than the value specified.
      :type min_score: float
      :param required_phrases: If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
      :type required_phrases: list
      :param filter_clause: If provided, filter the results of the query using this sql where clause.
      :type filter_clause: str
      :param crowding_limits: A dictionary mapping metadata columns to the maximum number of results per unique value of the column. This is used to ensure diversity of metadata attribute values in the results. If a particular attribute value has already reached its maximum count, further results with that same attribute value will be excluded from the final result set. An entry in the map can also be a map specifying the limit per attribute value rather than a single limit for all values. This allows a per value limit for attributes. If an attribute value is not present in the map its limit defaults to zero.
      :type crowding_limits: dict
      :param include_text_search: If true, combine the ranking of results from a BM25 text search over the documents with the vector search using reciprocal rank fusion. It leverages both lexical and semantic matching for better overall results. It's particularly valuable in professional, technical, or specialized fields where both precision in terminology and understanding of context are important.
      :type include_text_search: bool

      :returns: The relevant documentation results found from the document retriever.
      :rtype: list[DocumentRetrieverLookupResult]



   .. py:method:: get_completion(deployment_token, deployment_id, prompt)

      Returns the finetuned LLM generated completion of the prompt.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param prompt: The prompt given to the finetuned LLM to generate the completion.
      :type prompt: str



   .. py:method:: execute_agent_with_binary_data(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, blobs = None)

      Executes a deployed AI agent function with binary data as inputs.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str
      :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
      :type external_session_id: str
      :param blobs: A dictionary of binary data to use as inputs to the agent execute function.
      :type blobs: None

      :returns: The result of the agent execution
      :rtype: AgentDataExecutionResult



   .. py:method:: start_autonomous_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, save_conversations = True)

      Starts a deployed Autonomous agent associated with the given deployment_conversation_id using the arguments and keyword arguments as inputs for execute function of trigger node.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict
      :param save_conversations: If true then a new conversation will be created for every run of the workflow associated with the agent.
      :type save_conversations: bool



   .. py:method:: pause_autonomous_agent(deployment_token, deployment_id, deployment_conversation_id)

      Pauses a deployed Autonomous agent associated with the given deployment_conversation_id.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str



   .. py:method:: create_batch_prediction(deployment_id, table_name = None, name = None, global_prediction_args = None, batch_prediction_args = None, explanations = False, output_format = None, output_location = None, database_connector_id = None, database_output_config = None, refresh_schedule = None, csv_input_prefix = None, csv_prediction_prefix = None, csv_explanations_prefix = None, output_includes_metadata = None, result_input_columns = None, input_feature_groups = None)

      Creates a batch prediction job description for the given deployment.

      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str
      :param table_name: Name of the feature group table to write the results of the batch prediction. Can only be specified if outputLocation and databaseConnectorId are not specified. If tableName is specified, the outputType will be enforced as CSV.
      :type table_name: str
      :param name: Name of the batch prediction job.
      :type name: str
      :param batch_prediction_args: Batch Prediction args specific to problem type.
      :type batch_prediction_args: BatchPredictionArgs
      :param output_format: Format of the batch prediction output (CSV or JSON).
      :type output_format: str
      :param output_location: Location to write the prediction results. Otherwise, results will be stored in Abacus.AI.
      :type output_location: str
      :param database_connector_id: Unique identifier of a Database Connection to write predictions to. Cannot be specified in conjunction with outputLocation.
      :type database_connector_id: str
      :param database_output_config: Key-value pair of columns/values to write to the database connector. Only available if databaseConnectorId is specified.
      :type database_output_config: dict
      :param refresh_schedule: Cron-style string that describes a schedule in UTC to automatically run the batch prediction.
      :type refresh_schedule: str
      :param csv_input_prefix: Prefix to prepend to the input columns, only applies when output format is CSV.
      :type csv_input_prefix: str
      :param csv_prediction_prefix: Prefix to prepend to the prediction columns, only applies when output format is CSV.
      :type csv_prediction_prefix: str
      :param csv_explanations_prefix: Prefix to prepend to the explanation columns, only applies when output format is CSV.
      :type csv_explanations_prefix: str
      :param output_includes_metadata: If true, output will contain columns including prediction start time, batch prediction version, and model version.
      :type output_includes_metadata: bool
      :param result_input_columns: If present, will limit result files or feature groups to only include columns present in this list.
      :type result_input_columns: list
      :param input_feature_groups: A dict of {'<feature_group_type>': '<feature_group_id>'} which overrides the default input data of that type for the Batch Prediction. Default input data is the training data that was used for training the deployed model.
      :type input_feature_groups: dict

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: start_batch_prediction(batch_prediction_id)

      Creates a new batch prediction version job for a given batch prediction job description.

      :param batch_prediction_id: The unique identifier of the batch prediction to create a new version of.
      :type batch_prediction_id: str

      :returns: The batch prediction version started by this method call.
      :rtype: BatchPredictionVersion



   .. py:method:: update_batch_prediction(batch_prediction_id, deployment_id = None, global_prediction_args = None, batch_prediction_args = None, explanations = None, output_format = None, csv_input_prefix = None, csv_prediction_prefix = None, csv_explanations_prefix = None, output_includes_metadata = None, result_input_columns = None, name = None)

      Update a batch prediction job description.

      :param batch_prediction_id: Unique identifier of the batch prediction.
      :type batch_prediction_id: str
      :param deployment_id: Unique identifier of the deployment.
      :type deployment_id: str
      :param batch_prediction_args: Batch Prediction args specific to problem type.
      :type batch_prediction_args: BatchPredictionArgs
      :param output_format: If specified, sets the format of the batch prediction output (CSV or JSON).
      :type output_format: str
      :param csv_input_prefix: Prefix to prepend to the input columns, only applies when output format is CSV.
      :type csv_input_prefix: str
      :param csv_prediction_prefix: Prefix to prepend to the prediction columns, only applies when output format is CSV.
      :type csv_prediction_prefix: str
      :param csv_explanations_prefix: Prefix to prepend to the explanation columns, only applies when output format is CSV.
      :type csv_explanations_prefix: str
      :param output_includes_metadata: If True, output will contain columns including prediction start time, batch prediction version, and model version.
      :type output_includes_metadata: bool
      :param result_input_columns: If present, will limit result files or feature groups to only include columns present in this list.
      :type result_input_columns: list
      :param name: If present, will rename the batch prediction.
      :type name: str

      :returns: The batch prediction.
      :rtype: BatchPrediction



   .. py:method:: set_batch_prediction_file_connector_output(batch_prediction_id, output_format = None, output_location = None)

      Updates the file connector output configuration of the batch prediction

      :param batch_prediction_id: The unique identifier of the batch prediction.
      :type batch_prediction_id: str
      :param output_format: The format of the batch prediction output (CSV or JSON). If not specified, the default format will be used.
      :type output_format: str
      :param output_location: The location to write the prediction results. If not specified, results will be stored in Abacus.AI.
      :type output_location: str

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: set_batch_prediction_database_connector_output(batch_prediction_id, database_connector_id = None, database_output_config = None)

      Updates the database connector output configuration of the batch prediction

      :param batch_prediction_id: Unique string identifier of the batch prediction.
      :type batch_prediction_id: str
      :param database_connector_id: Unique string identifier of an Database Connection to write predictions to.
      :type database_connector_id: str
      :param database_output_config: Key-value pair of columns/values to write to the database connector.
      :type database_output_config: dict

      :returns: Description of the batch prediction.
      :rtype: BatchPrediction



   .. py:method:: set_batch_prediction_feature_group_output(batch_prediction_id, table_name)

      Creates a feature group and sets it as the batch prediction output.

      :param batch_prediction_id: Unique string identifier of the batch prediction.
      :type batch_prediction_id: str
      :param table_name: Name of the feature group table to create.
      :type table_name: str

      :returns: Batch prediction after the output has been applied.
      :rtype: BatchPrediction



   .. py:method:: set_batch_prediction_output_to_console(batch_prediction_id)

      Sets the batch prediction output to the console, clearing both the file connector and database connector configurations.

      :param batch_prediction_id: The unique identifier of the batch prediction.
      :type batch_prediction_id: str

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: set_batch_prediction_feature_group(batch_prediction_id, feature_group_type, feature_group_id = None)

      Sets the batch prediction input feature group.

      :param batch_prediction_id: Unique identifier of the batch prediction.
      :type batch_prediction_id: str
      :param feature_group_type: Enum string representing the feature group type to set. The type is based on the use case under which the feature group is being created (e.g. Catalog Attributes for personalized recommendation use case).
      :type feature_group_type: str
      :param feature_group_id: Unique identifier of the feature group to set as input to the batch prediction.
      :type feature_group_id: str

      :returns: Description of the batch prediction.
      :rtype: BatchPrediction



   .. py:method:: set_batch_prediction_dataset_remap(batch_prediction_id, dataset_id_remap)

      For the purpose of this batch prediction, will swap out datasets in the training feature groups

      :param batch_prediction_id: Unique string identifier of the batch prediction.
      :type batch_prediction_id: str
      :param dataset_id_remap: Key/value pairs of dataset ids to be replaced during the batch prediction.
      :type dataset_id_remap: dict

      :returns: Batch prediction object.
      :rtype: BatchPrediction



   .. py:method:: delete_batch_prediction(batch_prediction_id)

      Deletes a batch prediction and associated data, such as associated monitors.

      :param batch_prediction_id: Unique string identifier of the batch prediction.
      :type batch_prediction_id: str



   .. py:method:: upsert_item_embeddings(streaming_token, model_id, item_id, vector, catalog_id = None)

      Upserts an embedding vector for an item id for a model_id.

      :param streaming_token: The streaming token for authenticating requests to the model.
      :type streaming_token: str
      :param model_id: A unique string identifier for the model to upsert item embeddings to.
      :type model_id: str
      :param item_id: The item id for which its embeddings will be upserted.
      :type item_id: str
      :param vector: The embedding vector.
      :type vector: list
      :param catalog_id: The name of the catalog in the model to update.
      :type catalog_id: str



   .. py:method:: delete_item_embeddings(streaming_token, model_id, item_ids, catalog_id = None)

      Deletes KNN embeddings for a list of item IDs for a given model ID.

      :param streaming_token: The streaming token for authenticating requests to the model.
      :type streaming_token: str
      :param model_id: A unique string identifier for the model from which to delete item embeddings.
      :type model_id: str
      :param item_ids: A list of item IDs whose embeddings will be deleted.
      :type item_ids: list
      :param catalog_id: An optional name to specify which catalog in a model to update.
      :type catalog_id: str



   .. py:method:: upsert_multiple_item_embeddings(streaming_token, model_id, upserts, catalog_id = None)

      Upserts a knn embedding for multiple item ids for a model_id.

      :param streaming_token: The streaming token for authenticating requests to the model.
      :type streaming_token: str
      :param model_id: The unique string identifier of the model to upsert item embeddings to.
      :type model_id: str
      :param upserts: A list of dictionaries of the form {'itemId': ..., 'vector': [...]} for each upsert.
      :type upserts: list
      :param catalog_id: Name of the catalog in the model to update.
      :type catalog_id: str



   .. py:method:: append_data(feature_group_id, streaming_token, data)

      Appends new data into the feature group for a given lookup key recordId.

      :param feature_group_id: Unique string identifier for the streaming feature group to record data to.
      :type feature_group_id: str
      :param streaming_token: The streaming token for authenticating requests.
      :type streaming_token: str
      :param data: The data to record as a JSON object.
      :type data: dict



   .. py:method:: append_multiple_data(feature_group_id, streaming_token, data)

      Appends new data into the feature group for a given lookup key recordId.

      :param feature_group_id: Unique string identifier of the streaming feature group to record data to.
      :type feature_group_id: str
      :param streaming_token: Streaming token for authenticating requests.
      :type streaming_token: str
      :param data: Data to record, as a list of JSON objects.
      :type data: list



   .. py:method:: upsert_data(feature_group_id, data, streaming_token = None, blobs = None)

      Update new data into the feature group for a given lookup key record ID if the record ID is found; otherwise, insert new data into the feature group.

      :param feature_group_id: A unique string identifier of the online feature group to record data to.
      :type feature_group_id: str
      :param data: The data to record, in JSON format.
      :type data: dict
      :param streaming_token: Optional streaming token for authenticating requests if upserting to streaming FG.
      :type streaming_token: str
      :param blobs: A dictionary of binary data to populate file fields' in data to upsert to the streaming FG.
      :type blobs: None

      :returns: The feature group row that was upserted.
      :rtype: FeatureGroupRow



   .. py:method:: delete_data(feature_group_id, primary_key)

      Deletes a row from the feature group given the primary key

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param primary_key: The primary key value for which to delete the feature group row
      :type primary_key: str



   .. py:method:: describe_feature_group_row_process_by_key(deployment_id, primary_key_value)

      Gets the feature group row process.

      :param deployment_id: The deployment id
      :type deployment_id: str
      :param primary_key_value: The primary key value
      :type primary_key_value: str

      :returns: An object representing the feature group row process
      :rtype: FeatureGroupRowProcess



   .. py:method:: list_feature_group_row_processes(deployment_id, limit = None, status = None)

      Gets a list of feature group row processes.

      :param deployment_id: The deployment id for the process
      :type deployment_id: str
      :param limit: The maximum number of processes to return. Defaults to None.
      :type limit: int
      :param status: The status of the processes to return. Defaults to None.
      :type status: str

      :returns: A list of object representing the feature group row process
      :rtype: list[FeatureGroupRowProcess]



   .. py:method:: get_feature_group_row_process_summary(deployment_id)

      Gets a summary of the statuses of the individual feature group processes.

      :param deployment_id: The deployment id for the process
      :type deployment_id: str

      :returns: An object representing the summary of the statuses of the individual feature group processes
      :rtype: FeatureGroupRowProcessSummary



   .. py:method:: reset_feature_group_row_process_by_key(deployment_id, primary_key_value)

      Resets a feature group row process so that it can be reprocessed

      :param deployment_id: The deployment id
      :type deployment_id: str
      :param primary_key_value: The primary key value
      :type primary_key_value: str

      :returns: An object representing the feature group row process.
      :rtype: FeatureGroupRowProcess



   .. py:method:: get_feature_group_row_process_logs_by_key(deployment_id, primary_key_value)

      Gets the logs for a feature group row process

      :param deployment_id: The deployment id
      :type deployment_id: str
      :param primary_key_value: The primary key value
      :type primary_key_value: str

      :returns: An object representing the logs for the feature group row process
      :rtype: FeatureGroupRowProcessLogs



   .. py:method:: create_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, function_type = 'FEATURE_GROUP', description = None, examples = None, user_level_connectors = None, org_level_connectors = None, output_variable_mappings = None)

      Creates a custom Python function that is reusable.

      :param name: The name to identify the Python function. Must be a valid Python identifier.
      :type name: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param function_name: The name of the Python function.
      :type function_name: str
      :param function_variable_mappings: List of Python function arguments.
      :type function_variable_mappings: List
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param function_type: Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
      :type function_type: str
      :param description: Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
      :type description: str
      :param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.])
      :type examples: dict
      :param user_level_connectors: Dictionary containing user level connectors.
      :type user_level_connectors: Dict
      :param org_level_connectors: List containing organization level connectors.
      :type org_level_connectors: List
      :param output_variable_mappings: List of output variable mappings that defines the elements of the function's return value.
      :type output_variable_mappings: List

      :returns: The Python function that can be used (e.g. for feature group transform).
      :rtype: PythonFunction



   .. py:method:: update_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, description = None, examples = None, user_level_connectors = None, org_level_connectors = None, output_variable_mappings = None)

      Update custom python function with user inputs for the given python function.

      :param name: The name to identify the Python function. Must be a valid Python identifier.
      :type name: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param function_name: The name of the Python function within `source_code`.
      :type function_name: str
      :param function_variable_mappings: List of arguments required by `function_name`.
      :type function_variable_mappings: List
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param description: Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
      :type description: str
      :param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
      :type examples: dict
      :param user_level_connectors: Dictionary containing user level connectors.
      :type user_level_connectors: Dict
      :param org_level_connectors: List of organization level connectors.
      :type org_level_connectors: List
      :param output_variable_mappings: List of output variable mappings that defines the elements of the function's return value.
      :type output_variable_mappings: List

      :returns: The Python function object.
      :rtype: PythonFunction



   .. py:method:: delete_python_function(name)

      Removes an existing Python function.

      :param name: The name to identify the Python function. Must be a valid Python identifier.
      :type name: str



   .. py:method:: create_pipeline(pipeline_name, project_id = None, cron = None, is_prod = None)

      Creates a pipeline for executing multiple steps.

      :param pipeline_name: The name of the pipeline, which should be unique to the organization.
      :type pipeline_name: str
      :param project_id: A unique string identifier for the pipeline.
      :type project_id: str
      :param cron: A cron-like string specifying the frequency of pipeline reruns.
      :type cron: str
      :param is_prod: Whether the pipeline is a production pipeline or not.
      :type is_prod: bool

      :returns: An object that describes a Pipeline.
      :rtype: Pipeline



   .. py:method:: describe_pipeline(pipeline_id)

      Describes a given pipeline.

      :param pipeline_id: The ID of the pipeline to describe.
      :type pipeline_id: str

      :returns: An object describing a Pipeline
      :rtype: Pipeline



   .. py:method:: describe_pipeline_by_name(pipeline_name)

      Describes a given pipeline.

      :param pipeline_name: The name of the pipeline to describe.
      :type pipeline_name: str

      :returns: An object describing a Pipeline
      :rtype: Pipeline



   .. py:method:: update_pipeline(pipeline_id, project_id = None, pipeline_variable_mappings = None, cron = None, is_prod = None)

      Updates a pipeline for executing multiple steps.

      :param pipeline_id: The ID of the pipeline to update.
      :type pipeline_id: str
      :param project_id: A unique string identifier for the pipeline.
      :type project_id: str
      :param pipeline_variable_mappings: List of Python function arguments for the pipeline.
      :type pipeline_variable_mappings: List
      :param cron: A cron-like string specifying the frequency of the scheduled pipeline runs.
      :type cron: str
      :param is_prod: Whether the pipeline is a production pipeline or not.
      :type is_prod: bool

      :returns: An object that describes a Pipeline.
      :rtype: Pipeline



   .. py:method:: rename_pipeline(pipeline_id, pipeline_name)

      Renames a pipeline.

      :param pipeline_id: The ID of the pipeline to rename.
      :type pipeline_id: str
      :param pipeline_name: The new name of the pipeline.
      :type pipeline_name: str

      :returns: An object that describes a Pipeline.
      :rtype: Pipeline



   .. py:method:: delete_pipeline(pipeline_id)

      Deletes a pipeline.

      :param pipeline_id: The ID of the pipeline to delete.
      :type pipeline_id: str



   .. py:method:: list_pipeline_versions(pipeline_id, limit = 200)

      Lists the pipeline versions for a specified pipeline

      :param pipeline_id: The ID of the pipeline to list versions for.
      :type pipeline_id: str
      :param limit: The maximum number of pipeline versions to return.
      :type limit: int

      :returns: A list of pipeline versions.
      :rtype: list[PipelineVersion]



   .. py:method:: run_pipeline(pipeline_id, pipeline_variable_mappings = None)

      Runs a specified pipeline with the arguments provided.

      :param pipeline_id: The ID of the pipeline to run.
      :type pipeline_id: str
      :param pipeline_variable_mappings: List of Python function arguments for the pipeline.
      :type pipeline_variable_mappings: List

      :returns: The object describing the pipeline
      :rtype: PipelineVersion



   .. py:method:: reset_pipeline_version(pipeline_version, steps = None, include_downstream_steps = True)

      Reruns a pipeline version for the given steps and downstream steps if specified.

      :param pipeline_version: The id of the pipeline version.
      :type pipeline_version: str
      :param steps: List of pipeline step names to rerun.
      :type steps: list
      :param include_downstream_steps: Whether to rerun downstream steps from the steps you have passed
      :type include_downstream_steps: bool

      :returns: Object describing the pipeline version
      :rtype: PipelineVersion



   .. py:method:: create_pipeline_step(pipeline_id, step_name, function_name = None, source_code = None, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None, timeout = None)

      Creates a step in a given pipeline.

      :param pipeline_id: The ID of the pipeline to run.
      :type pipeline_id: str
      :param step_name: The name of the step.
      :type step_name: str
      :param function_name: The name of the Python function.
      :type function_name: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List
      :param output_variable_mappings: List of Python function outputs.
      :type output_variable_mappings: List
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: list
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int
      :param timeout: Timeout for the step in minutes, default is 300 minutes.
      :type timeout: int

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: delete_pipeline_step(pipeline_step_id)

      Deletes a step from a pipeline.

      :param pipeline_step_id: The ID of the pipeline step.
      :type pipeline_step_id: str



   .. py:method:: update_pipeline_step(pipeline_step_id, function_name = None, source_code = None, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None, timeout = None)

      Creates a step in a given pipeline.

      :param pipeline_step_id: The ID of the pipeline_step to update.
      :type pipeline_step_id: str
      :param function_name: The name of the Python function.
      :type function_name: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List
      :param output_variable_mappings: List of Python function outputs.
      :type output_variable_mappings: List
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: list
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int
      :param timeout: Timeout for the pipeline step, default is 300 minutes.
      :type timeout: int

      :returns: Object describing the pipeline.
      :rtype: PipelineStep



   .. py:method:: rename_pipeline_step(pipeline_step_id, step_name)

      Renames a step in a given pipeline.

      :param pipeline_step_id: The ID of the pipeline_step to update.
      :type pipeline_step_id: str
      :param step_name: The name of the step.
      :type step_name: str

      :returns: Object describing the pipeline.
      :rtype: PipelineStep



   .. py:method:: unset_pipeline_refresh_schedule(pipeline_id)

      Deletes the refresh schedule for a given pipeline.

      :param pipeline_id: The id of the pipeline.
      :type pipeline_id: str

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: pause_pipeline_refresh_schedule(pipeline_id)

      Pauses the refresh schedule for a given pipeline.

      :param pipeline_id: The id of the pipeline.
      :type pipeline_id: str

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: resume_pipeline_refresh_schedule(pipeline_id)

      Resumes the refresh schedule for a given pipeline.

      :param pipeline_id: The id of the pipeline.
      :type pipeline_id: str

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: skip_pending_pipeline_version_steps(pipeline_version)

      Skips pending steps in a pipeline version.

      :param pipeline_version: The id of the pipeline version.
      :type pipeline_version: str

      :returns: Object describing the pipeline version
      :rtype: PipelineVersion



   .. py:method:: create_graph_dashboard(project_id, name, python_function_ids = None)

      Create a plot dashboard given selected python plots

      :param project_id: A unique string identifier for the plot dashboard.
      :type project_id: str
      :param name: The name of the dashboard.
      :type name: str
      :param python_function_ids: A list of unique string identifiers for the python functions to be used in the graph dashboard.
      :type python_function_ids: List

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: delete_graph_dashboard(graph_dashboard_id)

      Deletes a graph dashboard

      :param graph_dashboard_id: Unique string identifier for the graph dashboard to be deleted.
      :type graph_dashboard_id: str



   .. py:method:: update_graph_dashboard(graph_dashboard_id, name = None, python_function_ids = None)

      Updates a graph dashboard

      :param graph_dashboard_id: Unique string identifier for the graph dashboard to update.
      :type graph_dashboard_id: str
      :param name: Name of the dashboard.
      :type name: str
      :param python_function_ids: List of unique string identifiers for the Python functions to be used in the graph dashboard.
      :type python_function_ids: List

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: add_graph_to_dashboard(python_function_id, graph_dashboard_id, function_variable_mappings = None, name = None)

      Add a python plot function to a dashboard

      :param python_function_id: Unique string identifier for the Python function.
      :type python_function_id: str
      :param graph_dashboard_id: Unique string identifier for the graph dashboard to update.
      :type graph_dashboard_id: str
      :param function_variable_mappings: List of arguments to be supplied to the function as parameters, in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
      :type function_variable_mappings: List
      :param name: Name of the added python plot
      :type name: str

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: update_graph_to_dashboard(graph_reference_id, function_variable_mappings = None, name = None)

      Update a python plot function to a dashboard

      :param graph_reference_id: A unique string identifier for the graph reference.
      :type graph_reference_id: str
      :param function_variable_mappings: A list of arguments to be supplied to the Python function as parameters in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
      :type function_variable_mappings: List
      :param name: The updated name for the graph
      :type name: str

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: delete_graph_from_dashboard(graph_reference_id)

      Deletes a python plot function from a dashboard

      :param graph_reference_id: Unique String Identifier for the graph
      :type graph_reference_id: str



   .. py:method:: create_algorithm(name, problem_type, source_code = None, training_data_parameter_names_mapping = None, training_config_parameter_name = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, config_options = None, is_default_enabled = False, project_id = None, use_gpu = False, package_requirements = None)

      Creates a custom algorithm that is re-usable for model training.

      :param name: The name to identify the algorithm; only uppercase letters, numbers, and underscores are allowed.
      :type name: str
      :param problem_type: The type of problem this algorithm will work on.
      :type problem_type: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the train/predict/predict_many/initialize functions. A list of allowed import and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param training_data_parameter_names_mapping: The mapping from feature group types to training data parameter names in the train function.
      :type training_data_parameter_names_mapping: dict
      :param training_config_parameter_name: The train config parameter name in the train function.
      :type training_config_parameter_name: str
      :param train_function_name: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the source code that will be executed for batch prediction of the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param initialize_function_name: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model.
      :type initialize_function_name: str
      :param config_options: Map dataset types and configs to train function parameter names.
      :type config_options: dict
      :param is_default_enabled: Whether to train with the algorithm by default.
      :type is_default_enabled: bool
      :param project_id: The unique version ID of the project.
      :type project_id: str
      :param use_gpu: Whether this algorithm needs to run on GPU.
      :type use_gpu: bool
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list

      :returns: The new custom model that can be used for training.
      :rtype: Algorithm



   .. py:method:: delete_algorithm(algorithm)

      Deletes the specified customer algorithm.

      :param algorithm: The name of the algorithm to delete.
      :type algorithm: str



   .. py:method:: update_algorithm(algorithm, source_code = None, training_data_parameter_names_mapping = None, training_config_parameter_name = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, config_options = None, is_default_enabled = None, use_gpu = None, package_requirements = None)

      Update a custom algorithm for the given algorithm name. If source code is provided, all function names for the source code must also be provided.

      :param algorithm: The name to identify the algorithm. Only uppercase letters, numbers, and underscores are allowed.
      :type algorithm: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the train/predict/predict_many/initialize functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param training_data_parameter_names_mapping: The mapping from feature group types to training data parameter names in the train function.
      :type training_data_parameter_names_mapping: dict
      :param training_config_parameter_name: The train config parameter name in the train function.
      :type training_config_parameter_name: str
      :param train_function_name: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the source code that will be executed for batch prediction of the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param initialize_function_name: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model.
      :type initialize_function_name: str
      :param config_options: Map dataset types and configs to train function parameter names.
      :type config_options: dict
      :param is_default_enabled: Whether to train with the algorithm by default.
      :type is_default_enabled: bool
      :param use_gpu: Whether this algorithm needs to run on GPU.
      :type use_gpu: bool
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list

      :returns: The new custom model can be used for training.
      :rtype: Algorithm



   .. py:method:: list_builtin_algorithms(project_id, feature_group_ids, training_config = None)

      Return list of built-in algorithms based on given input data and training config.

      :param project_id: Unique string identifier associated with the project.
      :type project_id: str
      :param feature_group_ids: List of feature group IDs specifying input data.
      :type feature_group_ids: List
      :param training_config: The training config to be used for model training.
      :type training_config: TrainingConfig

      :returns: List of applicable builtin algorithms.
      :rtype: list[Algorithm]



   .. py:method:: create_custom_loss_function_with_source_code(name, loss_function_type, loss_function_name, loss_function_source_code)

      Registers a new custom loss function which can be used as an objective function during model training.

      :param name: A name for the loss, unique per organization. Must be 50 characters or fewer, and can contain only underscores, numbers, and uppercase alphabets.
      :type name: str
      :param loss_function_type: The category of problems that this loss would be applicable to, e.g. REGRESSION_DL_TF, CLASSIFICATION_DL_TF, etc.
      :type loss_function_type: str
      :param loss_function_name: The name of the function whose full source code is passed in loss_function_source_code.
      :type loss_function_name: str
      :param loss_function_source_code: Python source code string of the function.
      :type loss_function_source_code: str

      :returns: A description of the registered custom loss function.
      :rtype: CustomLossFunction



   .. py:method:: update_custom_loss_function_with_source_code(name, loss_function_name, loss_function_source_code)

      Updates a previously registered custom loss function with a new function implementation.

      :param name: Name of the registered custom loss.
      :type name: str
      :param loss_function_name: Name of the function whose full source code is passed in loss_function_source_code.
      :type loss_function_name: str
      :param loss_function_source_code: Python source code string of the function.
      :type loss_function_source_code: str

      :returns: A description of the updated custom loss function.
      :rtype: CustomLossFunction



   .. py:method:: delete_custom_loss_function(name)

      Deletes a previously registered custom loss function.

      :param name: The name of the custom loss function to be deleted.
      :type name: str



   .. py:method:: create_custom_metric(name, problem_type, custom_metric_function_name = None, source_code = None)

      Registers a new custom metric which can be used as an evaluation metric for the trained model.

      :param name: A unique name for the metric, with a limit of 50 characters. Only underscores, numbers, and uppercase alphabets are allowed.
      :type name: str
      :param problem_type: The problem type that this metric would be applicable to, e.g. REGRESSION, FORECASTING, etc.
      :type problem_type: str
      :param custom_metric_function_name: The name of the function whose full source code is passed in source_code.
      :type custom_metric_function_name: str
      :param source_code: The full source code of the custom metric function. This is required if custom_metric_function_name is passed.
      :type source_code: str

      :returns: The newly created custom metric.
      :rtype: CustomMetric



   .. py:method:: update_custom_metric(name, custom_metric_function_name, source_code)

      Updates a previously registered custom metric with a new function implementation.

      :param name: Name of the registered custom metric.
      :type name: str
      :param custom_metric_function_name: Name of the function whose full source code is passed in `source_code`.
      :type custom_metric_function_name: str
      :param source_code: Python source code string of the function.
      :type source_code: str

      :returns: A description of the updated custom metric.
      :rtype: CustomMetric



   .. py:method:: delete_custom_metric(name)

      Deletes a previously registered custom metric.

      :param name: The name of the custom metric to be deleted.
      :type name: str



   .. py:method:: create_module(name, source_code = None)

      Creates a module that's re-usable in customer's code, e.g. python function, bring your own algorithm and etc.

      :param name: The name to identify the module, only lower case letters and underscore allowed.
      :type name: str
      :param source_code: Contents of a valid python source code file.
      :type source_code: str

      :returns: The new module
      :rtype: Module



   .. py:method:: delete_module(name)

      Deletes the specified customer module.

      :param name: The name of the custom module to delete.
      :type name: str



   .. py:method:: update_module(name, source_code = None)

      Update the module.

      :param name: The name to identify the module.
      :type name: str
      :param source_code: Contents of a valid python source code file.
      :type source_code: str

      :returns: The updated module.
      :rtype: Module



   .. py:method:: create_organization_secret(secret_key, value)

      Creates a secret which can be accessed in functions and notebooks.

      :param secret_key: The secret key.
      :type secret_key: str
      :param value: The secret value.
      :type value: str

      :returns: The created secret.
      :rtype: OrganizationSecret



   .. py:method:: delete_organization_secret(secret_key)

      Deletes a secret.

      :param secret_key: The secret key.
      :type secret_key: str



   .. py:method:: update_organization_secret(secret_key, value)

      Updates a secret.

      :param secret_key: The secret key.
      :type secret_key: str
      :param value: The secret value.
      :type value: str

      :returns: The updated secret.
      :rtype: OrganizationSecret



   .. py:method:: set_natural_language_explanation(short_explanation, long_explanation, feature_group_id = None, feature_group_version = None, model_id = None)

      Saves the natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version

      :param short_explanation: succinct explanation of the artifact with given ID
      :type short_explanation: str
      :param long_explanation: verbose explanation of the artifact with given ID
      :type long_explanation: str
      :param feature_group_id: A unique string identifier associated with the Feature Group.
      :type feature_group_id: str
      :param feature_group_version: A unique string identifier associated with the Feature Group Version.
      :type feature_group_version: str
      :param model_id: A unique string identifier associated with the Model.
      :type model_id: str



   .. py:method:: create_chat_session(project_id = None, name = None)

      Creates a chat session with Data Science Co-pilot.

      :param project_id: The unique project identifier this chat session belongs to
      :type project_id: str
      :param name: The name of the chat session. Defaults to the project name.
      :type name: str

      :returns: The chat session with Data Science Co-pilot
      :rtype: ChatSession



   .. py:method:: delete_chat_message(chat_session_id, message_index)

      Deletes a message in a chat session and its associated response.

      :param chat_session_id: Unique ID of the chat session.
      :type chat_session_id: str
      :param message_index: The index of the chat message within the UI.
      :type message_index: int



   .. py:method:: export_chat_session(chat_session_id)

      Exports a chat session to an HTML file

      :param chat_session_id: Unique ID of the chat session.
      :type chat_session_id: str



   .. py:method:: rename_chat_session(chat_session_id, name)

      Renames a chat session with Data Science Co-pilot.

      :param chat_session_id: Unique ID of the chat session.
      :type chat_session_id: str
      :param name: The new name of the chat session.
      :type name: str



   .. py:method:: suggest_abacus_apis(query, verbosity = 1, limit = 5, include_scores = False)

      Suggests several Abacus APIs that are most relevant to the supplied natural language query.

      :param query: The natural language query to find Abacus APIs for
      :type query: str
      :param verbosity: The verbosity level of the suggested Abacus APIs. Ranges from 0 to 2, with 0 being the least verbose and 2 being the most verbose.
      :type verbosity: int
      :param limit: The maximum number of APIs to return
      :type limit: int
      :param include_scores: Whether to include the relevance scores of the suggested APIs
      :type include_scores: bool

      :returns: A list of suggested Abacus APIs
      :rtype: list[AbacusApi]



   .. py:method:: create_deployment_conversation(deployment_id = None, name = None, external_application_id = None)

      Creates a deployment conversation.

      :param deployment_id: The deployment this conversation belongs to.
      :type deployment_id: str
      :param name: The name of the conversation.
      :type name: str
      :param external_application_id: The external application id associated with the deployment conversation.
      :type external_application_id: str

      :returns: The deployment conversation.
      :rtype: DeploymentConversation



   .. py:method:: delete_deployment_conversation(deployment_conversation_id, deployment_id = None)

      Delete a Deployment Conversation.

      :param deployment_conversation_id: A unique string identifier associated with the deployment conversation.
      :type deployment_conversation_id: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str



   .. py:method:: clear_deployment_conversation(deployment_conversation_id = None, external_session_id = None, deployment_id = None, user_message_indices = None)

      Clear the message history of a Deployment Conversation.

      :param deployment_conversation_id: A unique string identifier associated with the deployment conversation.
      :type deployment_conversation_id: str
      :param external_session_id: The external session id associated with the deployment conversation.
      :type external_session_id: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str
      :param user_message_indices: Optional list of user message indices to clear. The associated bot response will also be cleared. If not provided, all messages will be cleared.
      :type user_message_indices: list



   .. py:method:: set_deployment_conversation_feedback(deployment_conversation_id, message_index, is_useful = None, is_not_useful = None, feedback = None, feedback_type = None, deployment_id = None)

      Sets a deployment conversation message as useful or not useful

      :param deployment_conversation_id: A unique string identifier associated with the deployment conversation.
      :type deployment_conversation_id: str
      :param message_index: The index of the deployment conversation message
      :type message_index: int
      :param is_useful: If the message is useful. If true, the message is useful. If false, clear the useful flag.
      :type is_useful: bool
      :param is_not_useful: If the message is not useful. If true, the message is not useful. If set to false, clear the useful flag.
      :type is_not_useful: bool
      :param feedback: Optional feedback on why the message is useful or not useful
      :type feedback: str
      :param feedback_type: Optional feedback type
      :type feedback_type: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str



   .. py:method:: rename_deployment_conversation(deployment_conversation_id, name, deployment_id = None)

      Rename a Deployment Conversation.

      :param deployment_conversation_id: A unique string identifier associated with the deployment conversation.
      :type deployment_conversation_id: str
      :param name: The new name of the conversation.
      :type name: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str



   .. py:method:: create_app_user_group(name)

      Creates a new App User Group. This User Group is used to have permissions to access the external chatbots.

      :param name: The name of the App User Group.
      :type name: str

      :returns: The App User Group.
      :rtype: AppUserGroup



   .. py:method:: delete_app_user_group(user_group_id)

      Deletes an App User Group.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str



   .. py:method:: invite_users_to_app_user_group(user_group_id, emails)

      Invite users to an App User Group. This method will send the specified email addresses an invitation link to join a specific user group.

      This will allow them to use any chatbots that this user group has access to.


      :param user_group_id: The ID of the App User Group to invite the user to.
      :type user_group_id: str
      :param emails: The email addresses to invite to your user group.
      :type emails: List

      :returns: The response of the invitation. This will contain the emails that were successfully invited and the emails that were not.
      :rtype: ExternalInvite



   .. py:method:: add_users_to_app_user_group(user_group_id, user_emails)

      Adds users to a App User Group.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str
      :param user_emails: The emails of the users to add to the App User Group.
      :type user_emails: list



   .. py:method:: remove_users_from_app_user_group(user_group_id, user_emails)

      Removes users from an App User Group.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str
      :param user_emails: The emails of the users to remove from the App User Group.
      :type user_emails: list



   .. py:method:: add_app_user_group_report_permission(user_group_id)

      Give the App User Group the permission to view all reports in the corresponding organization.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str



   .. py:method:: remove_app_user_group_report_permission(user_group_id)

      Remove the App User Group's permission toview all reports in the corresponding organization.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str



   .. py:method:: add_app_user_group_to_external_application(user_group_id, external_application_id)

      Adds a permission for an App User Group to access an External Application.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str
      :param external_application_id: The ID of the External Application.
      :type external_application_id: str



   .. py:method:: remove_app_user_group_from_external_application(user_group_id, external_application_id)

      Removes a permission for an App User Group to access an External Application.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str
      :param external_application_id: The ID of the External Application.
      :type external_application_id: str



   .. py:method:: create_external_application(deployment_id, name = None, description = None, logo = None, theme = None)

      Creates a new External Application from an existing ChatLLM Deployment.

      :param deployment_id: The ID of the deployment to use.
      :type deployment_id: str
      :param name: The name of the External Application. If not provided, the name of the deployment will be used.
      :type name: str
      :param description: The description of the External Application. This will be shown to users when they access the External Application. If not provided, the description of the deployment will be used.
      :type description: str
      :param logo: The logo to be displayed.
      :type logo: str
      :param theme: The visual theme of the External Application.
      :type theme: dict

      :returns: The newly created External Application.
      :rtype: ExternalApplication



   .. py:method:: update_external_application(external_application_id, name = None, description = None, theme = None, deployment_id = None, deployment_conversation_retention_hours = None, reset_retention_policy = False)

      Updates an External Application.

      :param external_application_id: The ID of the External Application.
      :type external_application_id: str
      :param name: The name of the External Application.
      :type name: str
      :param description: The description of the External Application. This will be shown to users when they access the External Application.
      :type description: str
      :param theme: The visual theme of the External Application.
      :type theme: dict
      :param deployment_id: The ID of the deployment to use.
      :type deployment_id: str
      :param deployment_conversation_retention_hours: The number of hours to retain the conversations for.
      :type deployment_conversation_retention_hours: int
      :param reset_retention_policy: If true, the retention policy will be removed.
      :type reset_retention_policy: bool

      :returns: The updated External Application.
      :rtype: ExternalApplication



   .. py:method:: delete_external_application(external_application_id)

      Deletes an External Application.

      :param external_application_id: The ID of the External Application.
      :type external_application_id: str



   .. py:method:: create_agent(project_id, function_source_code = None, agent_function_name = None, name = None, memory = None, package_requirements = [], description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None, agent_interface = AgentInterface.DEFAULT, included_modules = None, org_level_connectors = None, user_level_connectors = None, initialize_function_name = None, initialize_function_code = None)

      Creates a new AI agent using the given agent workflow graph definition.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param name: The name you want your agent to have, defaults to "<Project Name> Agent".
      :type name: str
      :param memory: Overrides the default memory allocation (in GB) for the agent.
      :type memory: int
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param description: A description of the agent, including its purpose and instructions.
      :type description: str
      :param evaluation_feature_group_id: The ID of the feature group to use for evaluation.
      :type evaluation_feature_group_id: str
      :param workflow_graph: The workflow graph for the agent.
      :type workflow_graph: WorkflowGraph
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param included_modules: A list of user created custom modules to include in the agent's environment.
      :type included_modules: List
      :param org_level_connectors: A list of org level connector ids to be used by the agent.
      :type org_level_connectors: List
      :param user_level_connectors: A dictionary mapping ApplicationConnectorType keys to lists of OAuth scopes. Each key represents a specific user level application connector, while the value is a list of scopes that define the permissions granted to the application.
      :type user_level_connectors: Dict
      :param initialize_function_name: The name of the function to be used for initialization.
      :type initialize_function_name: str
      :param initialize_function_code: The function code to be used for initialization.
      :type initialize_function_code: str

      :returns: The new agent.
      :rtype: Agent



   .. py:method:: update_agent(model_id, function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None, agent_interface = None, included_modules = None, org_level_connectors = None, user_level_connectors = None, initialize_function_name = None, initialize_function_code = None)

      Updates an existing AI Agent. A new version of the agent will be created and published.

      :param model_id: The unique ID associated with the AI Agent to be changed.
      :type model_id: str
      :param memory: Memory (in GB) for the agent.
      :type memory: int
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param description: A description of the agent, including its purpose and instructions.
      :type description: str
      :param workflow_graph: The workflow graph for the agent.
      :type workflow_graph: WorkflowGraph
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param included_modules: A list of user created custom modules to include in the agent's environment.
      :type included_modules: List
      :param org_level_connectors: A list of org level connector ids to be used by the agent.
      :type org_level_connectors: List
      :param user_level_connectors: A dictionary mapping ApplicationConnectorType keys to lists of OAuth scopes. Each key represents a specific user level application connector, while the value is a list of scopes that define the permissions granted to the application.
      :type user_level_connectors: Dict
      :param initialize_function_name: The name of the function to be used for initialization.
      :type initialize_function_name: str
      :param initialize_function_code: The function code to be used for initialization.
      :type initialize_function_code: str

      :returns: The updated agent.
      :rtype: Agent



   .. py:method:: generate_agent_code(project_id, prompt, fast_mode = None)

      Generates the code for defining an AI Agent

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param prompt: A natural language prompt which describes agent specification. Describe what the agent will do, what inputs it will expect, and what outputs it will give out
      :type prompt: str
      :param fast_mode: If True, runs a faster but slightly less accurate code generation pipeline
      :type fast_mode: bool



   .. py:method:: evaluate_prompt(prompt = None, system_message = None, llm_name = None, max_tokens = None, temperature = 0.0, messages = None, response_type = None, json_response_schema = None, stop_sequences = None, top_p = None)

      Generate response to the prompt using the specified model.

      :param prompt: Prompt to use for generation.
      :type prompt: str
      :param system_message: System prompt for models that support it.
      :type system_message: str
      :param llm_name: Name of the underlying LLM to be used for generation. Default is auto selection.
      :type llm_name: LLMName
      :param max_tokens: Maximum number of tokens to generate. If set, the model will just stop generating after this token limit is reached.
      :type max_tokens: int
      :param temperature: Temperature to use for generation. Higher temperature makes more non-deterministic responses, a value of zero makes mostly deterministic reponses. Default is 0.0. A range of 0.0 - 2.0 is allowed.
      :type temperature: float
      :param messages: A list of messages to use as conversation history. A message is a dict with attributes: is_user (bool): Whether the message is from the user. text (str): The message's text. attachments (list): The files attached to the message represented as a list of dictionaries [{"doc_id": <doc_id1>}, {"doc_id": <doc_id2>}]
      :type messages: list
      :param response_type: Specifies the type of response to request from the LLM. One of 'text' and 'json'. If set to 'json', the LLM will respond with a json formatted string whose schema can be specified `json_response_schema`. Defaults to 'text'
      :type response_type: str
      :param json_response_schema: A dictionary specifying the keys/schema/parameters which LLM should adhere to in its response when `response_type` is 'json'. Each parameter is mapped to a dict with the following info - type (str) (required): Data type of the parameter. description (str) (required): Description of the parameter. is_required (bool) (optional): Whether the parameter is required or not. Example: json_response_schema = {'title': {'type': 'string', 'description': 'Article title', 'is_required': true}, 'body': {'type': 'string', 'description': 'Article body'}}
      :type json_response_schema: dict
      :param stop_sequences: Specifies the strings on which the LLM will stop generation.
      :type stop_sequences: List
      :param top_p: The nucleus sampling value used for this run. If set, the model will sample from the smallest set of tokens whose cumulative probability exceeds the probability `top_p`. Default is 1.0. A range of 0.0 - 1.0 is allowed. It is generally recommended to use either temperature sampling or nucleus sampling, but not both.
      :type top_p: float

      :returns: The response from the model, raw text and parsed components.
      :rtype: LlmResponse



   .. py:method:: render_feature_groups_for_llm(feature_group_ids, token_budget = None, include_definition = True)

      Encode feature groups as language model inputs.

      :param feature_group_ids: List of feature groups to be encoded.
      :type feature_group_ids: List
      :param token_budget: Enforce a given budget for each encoded feature group.
      :type token_budget: int
      :param include_definition: Include the definition of the feature group in the encoding.
      :type include_definition: bool

      :returns: LLM input object comprising of information about the feature groups with given IDs.
      :rtype: list[LlmInput]



   .. py:method:: generate_code_for_data_query_using_llm(query, feature_group_ids = None, external_database_schemas = None, prompt_context = None, llm_name = None, temperature = None, sql_dialect = 'Spark')

      Execute a data query using a large language model in an async fashion.

      :param query: The natural language query to execute. The query is converted to a SQL query using the language model.
      :type query: str
      :param feature_group_ids: A list of feature group IDs that the query should be executed against.
      :type feature_group_ids: List
      :param external_database_schemas: A list of schmeas from external database that the query should be executed against.
      :type external_database_schemas: List
      :param prompt_context: The context message used to construct the prompt for the language model. If not provide, a default context message is used.
      :type prompt_context: str
      :param llm_name: The name of the language model to use. If not provided, the default language model is used.
      :type llm_name: LLMName
      :param temperature: The temperature to use for the language model if supported. If not provided, the default temperature is used.
      :type temperature: float
      :param sql_dialect: The dialect of sql to generate sql for. The default is Spark.
      :type sql_dialect: str

      :returns: The generated SQL code.
      :rtype: LlmGeneratedCode



   .. py:method:: extract_data_using_llm(field_descriptors, document_id = None, document_text = None, llm_name = None)

      Extract fields from a document using a large language model.

      :param field_descriptors: A list of fields to extract from the document.
      :type field_descriptors: List
      :param document_id: The ID of the document to query.
      :type document_id: str
      :param document_text: The text of the document to query. Only used if document_id is not provided.
      :type document_text: str
      :param llm_name: The name of the language model to use. If not provided, the default language model is used.
      :type llm_name: LLMName

      :returns: The response from the document query.
      :rtype: ExtractedFields



   .. py:method:: search_web_for_llm(queries, search_providers = None, max_results = 1, safe = True, fetch_content = False, max_page_tokens = 8192, convert_to_markdown = True)

      Access web search providers to fetch content related to the queries for use in large language model inputs.

      This method can access multiple search providers and return information from them. If the provider supplies
      URLs for the results then this method also supports fetching the contents of those URLs, optionally converting
      them to markdown format, and returning them as part of the response. Set a token budget to limit the amount of
      content returned in the response.


      :param queries: List of queries to send to the search providers. At most 10 queries each less than 512 characters.
      :type queries: List
      :param search_providers: Search providers to use for the search. If not provided a default provider is used. - BING - GOOGLE
      :type search_providers: List
      :param max_results: Maximum number of results to fetch per provider. Must be in [1, 100]. Defaults to 1 (I'm feeling lucky).
      :type max_results: int
      :param safe: Whether content safety is enabled for these search request. Defaults to True.
      :type safe: bool
      :param fetch_content: If true fetches the content from the urls in the search results. Defailts to False.
      :type fetch_content: bool
      :param max_page_tokens: Maximum number of tokens to accumulate if fetching search result contents.
      :type max_page_tokens: int
      :param convert_to_markdown: Whether content should be converted to markdown. Defaults to True.
      :type convert_to_markdown: bool

      :returns: Results of running the search queries.
      :rtype: WebSearchResponse



   .. py:method:: fetch_web_page(url, convert_to_markdown = True)

      Scrapes the content of a web page and returns it as a string.

      :param url: The url of the web page to scrape.
      :type url: str
      :param convert_to_markdown: Whether content should be converted to markdown.
      :type convert_to_markdown: bool

      :returns: The content of the web page.
      :rtype: WebPageResponse



   .. py:method:: construct_agent_conversation_messages_for_llm(deployment_conversation_id = None, external_session_id = None, include_document_contents = True)

      Returns conversation history in a format for LLM calls.

      :param deployment_conversation_id: Unique ID of the conversation. One of deployment_conversation_id or external_session_id must be provided.
      :type deployment_conversation_id: str
      :param external_session_id: External session ID of the conversation.
      :type external_session_id: str
      :param include_document_contents: If true, include contents from uploaded documents in the generated messages.
      :type include_document_contents: bool

      :returns: Contains a list of AgentConversationMessage that represents the conversation.
      :rtype: AgentConversation



   .. py:method:: validate_workflow_graph(workflow_graph, agent_interface = AgentInterface.DEFAULT, package_requirements = [])

      Validates the workflow graph for an AI Agent.

      :param workflow_graph: The workflow graph to validate.
      :type workflow_graph: WorkflowGraph
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list



   .. py:method:: extract_agent_workflow_information(workflow_graph, agent_interface = AgentInterface.DEFAULT, package_requirements = [])

      Extracts source code of workflow graph, ancestors, in_edges and traversal orders from the agent workflow.

      :param workflow_graph: The workflow graph to validate.
      :type workflow_graph: WorkflowGraph
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list



   .. py:method:: get_llm_app_response(llm_app_name, prompt)

      Queries the specified LLM App to generate a response to the prompt. LLM Apps are LLMs tailored to achieve a specific task like code generation for a specific service's API.

      :param llm_app_name: The name of the LLM App to use for generation.
      :type llm_app_name: str
      :param prompt: The prompt to use for generation.
      :type prompt: str

      :returns: The response from the LLM App.
      :rtype: LlmResponse



   .. py:method:: create_document_retriever(project_id, name, feature_group_id, document_retriever_config = None)

      Returns a document retriever that stores embeddings for document chunks in a feature group.

      Document columns in the feature group are broken into chunks. For cases with multiple document columns, chunks from all columns are combined together to form a single chunk.


      :param project_id: The ID of project that the Document Retriever is created in.
      :type project_id: str
      :param name: The name of the Document Retriever. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type name: str
      :param feature_group_id: The ID of the feature group that the Document Retriever is associated with.
      :type feature_group_id: str
      :param document_retriever_config: The configuration, including chunk_size and chunk_overlap_fraction, for document retrieval.
      :type document_retriever_config: VectorStoreConfig

      :returns: The newly created document retriever.
      :rtype: DocumentRetriever



   .. py:method:: rename_document_retriever(document_retriever_id, name)

      Updates an existing document retriever.

      :param document_retriever_id: The unique ID associated with the document retriever.
      :type document_retriever_id: str
      :param name: The name to update the document retriever with.
      :type name: str

      :returns: The updated document retriever.
      :rtype: DocumentRetriever



   .. py:method:: create_document_retriever_version(document_retriever_id, feature_group_id = None, document_retriever_config = None)

      Creates a document retriever version from the latest version of the feature group that the document retriever associated with.

      :param document_retriever_id: The unique ID associated with the document retriever to create version with.
      :type document_retriever_id: str
      :param feature_group_id: The ID of the feature group to update the document retriever with.
      :type feature_group_id: str
      :param document_retriever_config: The configuration, including chunk_size and chunk_overlap_fraction, for document retrieval.
      :type document_retriever_config: VectorStoreConfig

      :returns: The newly created document retriever version.
      :rtype: DocumentRetrieverVersion



   .. py:method:: delete_document_retriever(vector_store_id)

      Delete a Document Retriever.

      :param vector_store_id: A unique string identifier associated with the document retriever.
      :type vector_store_id: str



   .. py:method:: delete_document_retriever_version(document_retriever_version)

      Delete a document retriever version.

      :param document_retriever_version: A unique string identifier associated with the document retriever version.
      :type document_retriever_version: str



   .. py:method:: get_document_snippet(document_retriever_id, document_id, start_word_index = None, end_word_index = None)

      Get a snippet from documents in the document retriever.

      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str
      :param document_id: The ID of the document to retrieve the snippet from.
      :type document_id: str
      :param start_word_index: If provided, will start the snippet at the index (of words in the document) specified.
      :type start_word_index: int
      :param end_word_index: If provided, will end the snippet at the index of (of words in the document) specified.
      :type end_word_index: int

      :returns: The documentation snippet found from the document retriever.
      :rtype: DocumentRetrieverLookupResult



   .. py:method:: restart_document_retriever(document_retriever_id)

      Restart the document retriever if it is stopped or has failed. This will start the deployment of the document retriever,

      but will not wait for it to be ready. You need to call wait_until_ready to wait until the deployment is ready.


      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str



   .. py:method:: get_relevant_snippets(doc_ids = None, blobs = None, query = None, document_retriever_config = None, honor_sentence_boundary = True, num_retrieval_margin_words = None, max_words_per_snippet = None, max_snippets_per_document = None, start_word_index = None, end_word_index = None, including_bounding_boxes = False, text = None, document_processing_config = None)

      Retrieves snippets relevant to a given query from specified documents. This function supports flexible input options,

      allowing for retrieval from a variety of data sources including document IDs, blob data, and plain text. When multiple data
      sources are provided, all are considered in the retrieval process. Document retrievers may be created on-the-fly to perform lookup.


      :param doc_ids: A list of document store IDs to retrieve the snippets from.
      :type doc_ids: List
      :param blobs: A dictionary mapping document names to the blob data.
      :type blobs: io.TextIOBase
      :param query: Query string to find relevant snippets in the documents.
      :type query: str
      :param document_retriever_config: If provided, used to configure the retrieval steps like chunking for embeddings.
      :type document_retriever_config: VectorStoreConfig
      :param num_retrieval_margin_words: If provided, will add this number of words from left and right of the returned snippets.
      :type num_retrieval_margin_words: int
      :param max_words_per_snippet: If provided, will limit the number of words in each snippet to the value specified.
      :type max_words_per_snippet: int
      :param max_snippets_per_document: If provided, will limit the number of snippets retrieved from each document to the value specified.
      :type max_snippets_per_document: int
      :param start_word_index: If provided, will start the snippet at the index (of words in the document) specified.
      :type start_word_index: int
      :param end_word_index: If provided, will end the snippet at the index of (of words in the document) specified.
      :type end_word_index: int
      :param including_bounding_boxes: If true, will include the bounding boxes of the snippets if they are available.
      :type including_bounding_boxes: bool
      :param text: Plain text from which to retrieve snippets.
      :type text: str
      :param document_processing_config: The document processing configuration used to extract text when doc_ids or blobs are provided. If provided, this will override including_bounding_boxes parameter.
      :type document_processing_config: DocumentProcessingConfig

      :returns: The snippets found from the documents.
      :rtype: list[DocumentRetrieverLookupResult]



.. py:exception:: ApiException(message, http_status, exception = None, request_id = None)

   Bases: :py:obj:`Exception`


   Default ApiException raised by APIs

   :param message: The error message
   :type message: str
   :param http_status: The https status code raised by the server
   :type http_status: int
   :param exception: The exception class raised by the server
   :type exception: str
   :param request_id: The request id
   :type request_id: str


   .. py:attribute:: message


   .. py:attribute:: http_status


   .. py:attribute:: exception
      :value: 'ApiException'



   .. py:attribute:: request_id
      :value: None



   .. py:method:: __str__()

      Return str(self).



.. py:class:: ClientOptions(exception_on_404 = True, server = DEFAULT_SERVER)

   Options for configuring the ApiClient

   :param exception_on_404: If true, will raise an exception on a 404 from the server, else will return None.
   :type exception_on_404: bool
   :param server: The default server endpoint to use for API requests
   :type server: str


   .. py:attribute:: exception_on_404
      :value: True



   .. py:attribute:: server
      :value: 'https://api.abacus.ai'



.. py:class:: ReadOnlyClient(api_key = None, server = None, client_options = None, skip_version_check = False, include_tb = False)

   Bases: :py:obj:`BaseApiClient`


   Abacus.AI Read Only API Client. Only contains GET methods

   :param api_key: The api key to use as authentication to the server
   :type api_key: str
   :param server: The base server url to use to send API requets to
   :type server: str
   :param client_options: Optional API client configurations
   :type client_options: ClientOptions
   :param skip_version_check: If true, will skip checking the server's current API version on initializing the client
   :type skip_version_check: bool


   .. py:method:: list_api_keys()

      Lists all of the user's API keys

      :returns: List of API Keys for the current user's organization.
      :rtype: list[ApiKey]



   .. py:method:: list_organization_users()

      Retrieves a list of all platform users in the organization, including pending users who have been invited.

      :returns: An array of all the users in the organization.
      :rtype: list[User]



   .. py:method:: describe_user()

      Retrieve the current user's information, such as their name, email address, and admin status.

      :returns: An object containing information about the current user.
      :rtype: User



   .. py:method:: list_organization_groups()

      Lists all Organizations Groups

      :returns: A list of all the organization groups within this organization.
      :rtype: list[OrganizationGroup]



   .. py:method:: describe_organization_group(organization_group_id)

      Returns the specific organization group passed in by the user.

      :param organization_group_id: The unique identifier of the organization group to be described.
      :type organization_group_id: str

      :returns: Information about a specific organization group.
      :rtype: OrganizationGroup



   .. py:method:: describe_webhook(webhook_id)

      Describe the webhook with a given ID.

      :param webhook_id: Unique string identifier of the target webhook.
      :type webhook_id: str

      :returns: The webhook with the given ID.
      :rtype: Webhook



   .. py:method:: list_deployment_webhooks(deployment_id)

      List all the webhooks attached to a given deployment.

      :param deployment_id: Unique identifier of the target deployment.
      :type deployment_id: str

      :returns: List of the webhooks attached to the given deployment ID.
      :rtype: list[Webhook]



   .. py:method:: list_use_cases()

      Retrieves a list of all use cases with descriptions. Use the given mappings to specify a use case when needed.

      :returns: A list of `UseCase` objects describing all the use cases addressed by the platform. For details, please refer to.
      :rtype: list[UseCase]



   .. py:method:: describe_problem_type(problem_type)

      Describes a problem type

      :param problem_type: The problem type to get details on
      :type problem_type: str

      :returns: The problem type requirements
      :rtype: ProblemType



   .. py:method:: describe_use_case_requirements(use_case)

      This API call returns the feature requirements for a specified use case.

      :param use_case: This contains the Enum String for the use case whose dataset requirements are needed.
      :type use_case: str

      :returns: The feature requirements of the use case are returned, including all the feature groups required for the use case along with their descriptions and feature mapping details.
      :rtype: list[UseCaseRequirements]



   .. py:method:: describe_project(project_id)

      Returns a description of a project.

      :param project_id: A unique string identifier for the project.
      :type project_id: str

      :returns: The description of the project.
      :rtype: Project



   .. py:method:: list_projects(limit = 100, start_after_id = None)

      Retrieves a list of all projects in the current organization.

      :param limit: The maximum length of the list of projects.
      :type limit: int
      :param start_after_id: The ID of the project after which the list starts.
      :type start_after_id: str

      :returns: A list of all projects in the Organization the user is currently logged in to.
      :rtype: list[Project]



   .. py:method:: get_project_feature_group_config(feature_group_id, project_id)

      Gets a feature group's project config

      :param feature_group_id: Unique string identifier for the feature group.
      :type feature_group_id: str
      :param project_id: Unique string identifier for the project.
      :type project_id: str

      :returns: The feature group's project configuration.
      :rtype: ProjectConfig



   .. py:method:: validate_project(project_id, feature_group_ids = None)

      Validates that the specified project has all required feature group types for its use case and that all required feature columns are set.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_ids: The list of feature group IDs to validate.
      :type feature_group_ids: List

      :returns: The project validation. If the specified project is missing required columns or feature groups, the response includes an array of objects for each missing required feature group and the missing required features in each feature group.
      :rtype: ProjectValidation



   .. py:method:: infer_feature_mappings(project_id, feature_group_id)

      Infer the feature mappings for the feature group in the project based on the problem type.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str

      :returns: A dict that contains the inferred feature mappings.
      :rtype: InferredFeatureMappings



   .. py:method:: verify_and_describe_annotation(feature_group_id, feature_name = None, doc_id = None, feature_group_row_identifier = None)

      Get the latest annotation entry for a given feature group, feature, and document along with verification information.

      :param feature_group_id: The ID of the feature group the annotation is on.
      :type feature_group_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str

      :returns: The latest annotation entry for the given feature group, feature, document, and/or annotation key value. Includes the verification information.
      :rtype: AnnotationEntry



   .. py:method:: get_annotations_status(feature_group_id, feature_name = None, check_for_materialization = False)

      Get the status of the annotations for a given feature group and feature.

      :param feature_group_id: The ID of the feature group the annotation is on.
      :type feature_group_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param check_for_materialization: If True, check if the feature group needs to be materialized before using for annotations.
      :type check_for_materialization: bool

      :returns: The status of the annotations for the given feature group and feature.
      :rtype: AnnotationsStatus



   .. py:method:: get_feature_group_schema(feature_group_id, project_id = None)

      Returns a schema for a given FeatureGroup in a project.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param project_id: The unique ID associated with the project.
      :type project_id: str

      :returns: A list of objects for each column in the specified feature group.
      :rtype: list[Feature]



   .. py:method:: get_point_in_time_feature_group_creation_options()

      Returns the options that can be used to generate PIT features.

      :returns: List of possible generated aggregation function options.
      :rtype: list[GeneratedPitFeatureConfigOption]



   .. py:method:: describe_feature_group(feature_group_id)

      Describe a Feature Group.

      :param feature_group_id: A unique string identifier associated with the feature group.
      :type feature_group_id: str

      :returns: The feature group object.
      :rtype: FeatureGroup



   .. py:method:: describe_feature_group_by_table_name(table_name)

      Describe a Feature Group by its table name.

      :param table_name: The unique table name of the Feature Group to look up.
      :type table_name: str

      :returns: The Feature Group.
      :rtype: FeatureGroup



   .. py:method:: list_feature_groups(limit = 100, start_after_id = None, feature_group_template_id = None, is_including_detached_from_template = False)

      List all the feature groups

      :param limit: The number of feature groups to retrieve.
      :type limit: int
      :param start_after_id: An offset parameter to exclude all feature groups up to a specified ID.
      :type start_after_id: str
      :param feature_group_template_id: If specified, limit the results to feature groups attached to this template ID.
      :type feature_group_template_id: str
      :param is_including_detached_from_template: When feature_group_template_id is specified, include feature groups that have been detached from that template ID.
      :type is_including_detached_from_template: bool

      :returns: All the feature groups in the organization associated with the specified project.
      :rtype: list[FeatureGroup]



   .. py:method:: describe_project_feature_group(project_id, feature_group_id)

      Describe a feature group associated with a project

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str

      :returns: The project feature group object.
      :rtype: ProjectFeatureGroup



   .. py:method:: list_project_feature_groups(project_id, filter_feature_group_use = None, limit = 100, start_after_id = None)

      List all the feature groups associated with a project

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param filter_feature_group_use: The feature group use filter, when given as an argument only allows feature groups present in this project to be returned if they are of the given use. Possible values are: 'USER_CREATED', 'BATCH_PREDICTION_OUTPUT'.
      :type filter_feature_group_use: str
      :param limit: The maximum number of feature groups to be retrieved.
      :type limit: int
      :param start_after_id: An offset parameter to exclude all feature groups up to a specified ID.
      :type start_after_id: str

      :returns: All the Feature Groups in a project.
      :rtype: list[ProjectFeatureGroup]



   .. py:method:: list_python_function_feature_groups(name, limit = 100)

      List all the feature groups associated with a python function.

      :param name: The name used to identify the Python function.
      :type name: str
      :param limit: The maximum number of feature groups to be retrieved.
      :type limit: int

      :returns: All the feature groups associated with the specified Python function ID.
      :rtype: list[FeatureGroup]



   .. py:method:: get_execute_feature_group_operation_result_part_count(feature_group_operation_run_id)

      Gets the number of parts in the result of the execution of fg operation

      :param feature_group_operation_run_id: The unique ID associated with the execution.
      :type feature_group_operation_run_id: str



   .. py:method:: download_execute_feature_group_operation_result_part_chunk(feature_group_operation_run_id, part, offset = 0, chunk_size = 10485760)

      Downloads a chunk of the result of the execution of feature group operation

      :param feature_group_operation_run_id: The unique ID associated with the execution.
      :type feature_group_operation_run_id: str
      :param part: The part number of the result
      :type part: int
      :param offset: The offset in the part
      :type offset: int
      :param chunk_size: The size of the chunk
      :type chunk_size: int



   .. py:method:: update_feature_group_version_limit(feature_group_id, version_limit)

      Updates the version limit for the feature group.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param version_limit: The maximum number of versions permitted for the feature group. Once this limit is exceeded, the oldest versions will be purged in a First-In-First-Out (FIFO) order.
      :type version_limit: int

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: get_feature_group_version_export_download_url(feature_group_export_id)

      Get a link to download the feature group version.

      :param feature_group_export_id: Unique identifier of the Feature Group Export to get a signed URL for.
      :type feature_group_export_id: str

      :returns: Instance containing the download URL and expiration time for the Feature Group Export.
      :rtype: FeatureGroupExportDownloadUrl



   .. py:method:: describe_feature_group_export(feature_group_export_id)

      A feature group export

      :param feature_group_export_id: Unique identifier of the feature group export.
      :type feature_group_export_id: str

      :returns: The feature group export object.
      :rtype: FeatureGroupExport



   .. py:method:: list_feature_group_exports(feature_group_id)

      Lists all of the feature group exports for the feature group

      :param feature_group_id: Unique identifier of the feature group
      :type feature_group_id: str

      :returns: List of feature group exports
      :rtype: list[FeatureGroupExport]



   .. py:method:: get_feature_group_export_connector_errors(feature_group_export_id)

      Returns a stream containing the write errors of the feature group export database connection, if any writes failed to the database connector.

      :param feature_group_export_id: Unique identifier of the feature group export to get the errors for.
      :type feature_group_export_id: str



   .. py:method:: list_feature_group_modifiers(feature_group_id)

      List the users who can modify a given feature group.

      :param feature_group_id: Unique string identifier of the feature group.
      :type feature_group_id: str

      :returns: Information about the modification lock status and groups/organizations added to the feature group.
      :rtype: ModificationLockInfo



   .. py:method:: get_materialization_logs(feature_group_version, stdout = False, stderr = False)

      Returns logs for a materialized feature group version.

      :param feature_group_version: Unique string identifier for the feature group instance to export.
      :type feature_group_version: str
      :param stdout: Set to True to get info logs.
      :type stdout: bool
      :param stderr: Set to True to get error logs.
      :type stderr: bool

      :returns: A function logs object.
      :rtype: FunctionLogs



   .. py:method:: list_feature_group_versions(feature_group_id, limit = 100, start_after_version = None)

      Retrieves a list of all feature group versions for the specified feature group.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param limit: The maximum length of the returned versions.
      :type limit: int
      :param start_after_version: Results will start after this version.
      :type start_after_version: str

      :returns: A list of feature group versions.
      :rtype: list[FeatureGroupVersion]



   .. py:method:: describe_feature_group_version(feature_group_version)

      Describe a feature group version.

      :param feature_group_version: The unique identifier associated with the feature group version.
      :type feature_group_version: str

      :returns: The feature group version.
      :rtype: FeatureGroupVersion



   .. py:method:: get_feature_group_version_metrics(feature_group_version, selected_columns = None, include_charts = False, include_statistics = True)

      Get metrics for a specific feature group version.

      :param feature_group_version: A unique string identifier associated with the feature group version.
      :type feature_group_version: str
      :param selected_columns: A list of columns to order first.
      :type selected_columns: List
      :param include_charts: A flag indicating whether charts should be included in the response. Default is false.
      :type include_charts: bool
      :param include_statistics: A flag indicating whether statistics should be included in the response. Default is true.
      :type include_statistics: bool

      :returns: The metrics for the specified feature group version.
      :rtype: DataMetrics



   .. py:method:: get_feature_group_version_logs(feature_group_version)

      Retrieves the feature group materialization logs.

      :param feature_group_version: The unique version ID of the feature group version.
      :type feature_group_version: str

      :returns: The logs for the specified feature group version.
      :rtype: FeatureGroupVersionLogs



   .. py:method:: describe_feature_group_template(feature_group_template_id)

      Describe a Feature Group Template.

      :param feature_group_template_id: The unique identifier of a feature group template.
      :type feature_group_template_id: str

      :returns: The feature group template object.
      :rtype: FeatureGroupTemplate



   .. py:method:: list_feature_group_templates(limit = 100, start_after_id = None, feature_group_id = None, should_include_system_templates = False)

      List feature group templates, optionally scoped by the feature group that created the templates.

      :param limit: Maximum number of templates to be retrieved.
      :type limit: int
      :param start_after_id: Offset parameter to exclude all templates up to the specified feature group template ID.
      :type start_after_id: str
      :param feature_group_id: If specified, limit to templates created from this feature group.
      :type feature_group_id: str
      :param should_include_system_templates: If True, will include built-in templates.
      :type should_include_system_templates: bool

      :returns: All the feature groups in the organization, optionally limited by the feature group that created the template(s).
      :rtype: list[FeatureGroupTemplate]



   .. py:method:: list_project_feature_group_templates(project_id, limit = 100, start_after_id = None, should_include_all_system_templates = False)

      List feature group templates for feature groups associated with the project.

      :param project_id: Unique string identifier to limit to templates associated with this project, e.g. templates associated with feature groups in this project.
      :type project_id: str
      :param limit: Maximum number of templates to be retrieved.
      :type limit: int
      :param start_after_id: Offset parameter to exclude all templates till the specified feature group template ID.
      :type start_after_id: str
      :param should_include_all_system_templates: If True, will include built-in templates.
      :type should_include_all_system_templates: bool

      :returns: All the feature groups in the organization, optionally limited by the feature group that created the template(s).
      :rtype: list[FeatureGroupTemplate]



   .. py:method:: suggest_feature_group_template_for_feature_group(feature_group_id)

      Suggest values for a feature gruop template, based on a feature group.

      :param feature_group_id: Unique identifier associated with the feature group to use for suggesting values to use in the template.
      :type feature_group_id: str

      :returns: The suggested feature group template.
      :rtype: FeatureGroupTemplate



   .. py:method:: get_dataset_schema(dataset_id)

      Retrieves the column schema of a dataset.

      :param dataset_id: Unique string identifier of the dataset schema to look up.
      :type dataset_id: str

      :returns: List of column schema definitions.
      :rtype: list[DatasetColumn]



   .. py:method:: set_dataset_database_connector_config(dataset_id, database_connector_id, object_name = None, columns = None, query_arguments = None, sql_query = None)

      Sets database connector config for a dataset. This method is currently only supported for streaming datasets.

      :param dataset_id: Unique String Identifier of the dataset_id.
      :type dataset_id: str
      :param database_connector_id: Unique String Identifier of the Database Connector to import the dataset from.
      :type database_connector_id: str
      :param object_name: If applicable, the name/ID of the object in the service to query.
      :type object_name: str
      :param columns: The columns to query from the external service object.
      :type columns: str
      :param query_arguments: Additional query arguments to filter the data.
      :type query_arguments: str
      :param sql_query: The full SQL query to use when fetching data. If present, this parameter will override `object_name`, `columns` and `query_arguments`.
      :type sql_query: str



   .. py:method:: get_dataset_version_metrics(dataset_version, selected_columns = None, include_charts = False, include_statistics = True)

      Get metrics for a specific dataset version.

      :param dataset_version: A unique string identifier associated with the dataset version.
      :type dataset_version: str
      :param selected_columns: A list of columns to order first.
      :type selected_columns: List
      :param include_charts: A flag indicating whether charts should be included in the response. Default is false.
      :type include_charts: bool
      :param include_statistics: A flag indicating whether statistics should be included in the response. Default is true.
      :type include_statistics: bool

      :returns: The metrics for the specified Dataset version.
      :rtype: DataMetrics



   .. py:method:: update_dataset_version_limit(dataset_id, version_limit)

      Updates the version limit for the specified dataset.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str
      :param version_limit: The maximum number of versions permitted for the feature group. Once this limit is exceeded, the oldest versions will be purged in a First-In-First-Out (FIFO) order.
      :type version_limit: int

      :returns: The updated dataset.
      :rtype: Dataset



   .. py:method:: get_file_connector_instructions(bucket, write_permission = False)

      Retrieves verification information to create a data connector to a cloud storage bucket.

      :param bucket: The fully-qualified URI of the storage bucket to verify.
      :type bucket: str
      :param write_permission: If `True`, instructions will include steps for allowing Abacus.AI to write to this service.
      :type write_permission: bool

      :returns: An object with a full description of the cloud storage bucket authentication options and bucket policy. Returns an error message if the parameters are invalid.
      :rtype: FileConnectorInstructions



   .. py:method:: list_database_connectors()

      Retrieves a list of all database connectors along with their associated attributes.

      :returns: An object containing the database connector and its attributes.
      :rtype: list[DatabaseConnector]



   .. py:method:: list_file_connectors()

      Retrieves a list of all connected services in the organization and their current verification status.

      :returns: A list of cloud storage buckets connected to the organization.
      :rtype: list[FileConnector]



   .. py:method:: list_database_connector_objects(database_connector_id, fetch_raw_data = False)

      Lists querable objects in the database connector.

      :param database_connector_id: Unique string identifier for the database connector.
      :type database_connector_id: str
      :param fetch_raw_data: If true, return unfiltered objects.
      :type fetch_raw_data: bool



   .. py:method:: get_database_connector_object_schema(database_connector_id, object_name = None, fetch_raw_data = False)

      Get the schema of an object in an database connector.

      :param database_connector_id: Unique string identifier for the database connector.
      :type database_connector_id: str
      :param object_name: Unique identifier for the object in the external system.
      :type object_name: str
      :param fetch_raw_data: If true, return unfiltered list of columns.
      :type fetch_raw_data: bool

      :returns: The schema of the object.
      :rtype: DatabaseConnectorSchema



   .. py:method:: query_database_connector(database_connector_id, query)

      Runs a query in the specified database connector.

      :param database_connector_id: A unique string identifier for the database connector.
      :type database_connector_id: str
      :param query: The query to be run in the database connector.
      :type query: str



   .. py:method:: list_application_connectors()

      Retrieves a list of all application connectors along with their associated attributes.

      :returns: A list of application connectors.
      :rtype: list[ApplicationConnector]



   .. py:method:: list_application_connector_objects(application_connector_id)

      Lists querable objects in the application connector.

      :param application_connector_id: Unique string identifier for the application connector.
      :type application_connector_id: str



   .. py:method:: get_connector_auth(service = None, application_connector_id = None, scopes = None)

      Get the authentication details for a given connector. For user level connectors, the service is required. For org level connectors, the application_connector_id is required.

      :param service: The service name.
      :type service: ApplicationConnectorType
      :param application_connector_id: The unique ID associated with the connector.
      :type application_connector_id: str
      :param scopes: The scopes to request for the connector.
      :type scopes: List

      :returns: The application connector with the authentication details.
      :rtype: ApplicationConnector



   .. py:method:: list_streaming_connectors()

      Retrieves a list of all streaming connectors along with their corresponding attributes.

      :returns: A list of StreamingConnector objects.
      :rtype: list[StreamingConnector]



   .. py:method:: list_streaming_tokens()

      Retrieves a list of all streaming tokens.

      :returns: A list of streaming tokens and their associated attributes.
      :rtype: list[StreamingAuthToken]



   .. py:method:: get_recent_feature_group_streamed_data(feature_group_id)

      Returns recently streamed data to a streaming feature group.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str



   .. py:method:: list_uploads()

      Lists all pending uploads

      :returns: A list of ongoing uploads in the organization.
      :rtype: list[Upload]



   .. py:method:: describe_upload(upload_id)

      Retrieves the current upload status (complete or inspecting) and the list of file parts uploaded for a specified dataset upload.

      :param upload_id: The unique ID associated with the file uploaded or being uploaded in parts.
      :type upload_id: str

      :returns: Details associated with the large dataset file uploaded in parts.
      :rtype: Upload



   .. py:method:: list_datasets(limit = 100, start_after_id = None, exclude_streaming = False)

      Retrieves a list of all datasets in the organization.

      :param limit: Maximum length of the list of datasets.
      :type limit: int
      :param start_after_id: ID of the dataset after which the list starts.
      :type start_after_id: str
      :param exclude_streaming: Exclude streaming datasets from the result.
      :type exclude_streaming: bool

      :returns: List of datasets.
      :rtype: list[Dataset]



   .. py:method:: describe_dataset(dataset_id)

      Retrieves a full description of the specified dataset, with attributes such as its ID, name, source type, etc.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str

      :returns: The dataset.
      :rtype: Dataset



   .. py:method:: describe_dataset_version(dataset_version)

      Retrieves a full description of the specified dataset version, including its ID, name, source type, and other attributes.

      :param dataset_version: Unique string identifier associated with the dataset version.
      :type dataset_version: str

      :returns: The dataset version.
      :rtype: DatasetVersion



   .. py:method:: list_dataset_versions(dataset_id, limit = 100, start_after_version = None)

      Retrieves a list of all dataset versions for the specified dataset.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str
      :param limit: The maximum length of the list of all dataset versions.
      :type limit: int
      :param start_after_version: The ID of the version after which the list starts.
      :type start_after_version: str

      :returns: A list of dataset versions.
      :rtype: list[DatasetVersion]



   .. py:method:: get_dataset_version_logs(dataset_version)

      Retrieves the dataset import logs.

      :param dataset_version: The unique version ID of the dataset version.
      :type dataset_version: str

      :returns: The logs for the specified dataset version.
      :rtype: DatasetVersionLogs



   .. py:method:: get_docstore_document(doc_id)

      Return a document store document by id.

      :param doc_id: Unique Docstore string identifier for the document.
      :type doc_id: str



   .. py:method:: get_docstore_image(doc_id, max_width = None, max_height = None)

      Return a document store image by id.

      :param doc_id: A unique Docstore string identifier for the image.
      :type doc_id: str
      :param max_width: Rescales the returned image so the width is less than or equal to the given maximum width, while preserving the aspect ratio.
      :type max_width: int
      :param max_height: Rescales the returned image so the height is less than or equal to the given maximum height, while preserving the aspect ratio.
      :type max_height: int



   .. py:method:: describe_train_test_data_split_feature_group(model_id)

      Get the train and test data split for a trained model by its unique identifier. This is only supported for models with custom algorithms.

      :param model_id: The unique ID of the model. By default, the latest model version will be returned if no version is specified.
      :type model_id: str

      :returns: The feature group containing the training data and fold information.
      :rtype: FeatureGroup



   .. py:method:: describe_train_test_data_split_feature_group_version(model_version)

      Get the train and test data split for a trained model by model version. This is only supported for models with custom algorithms.

      :param model_version: The unique version ID of the model version.
      :type model_version: str

      :returns: The feature group version containing the training data and folds information.
      :rtype: FeatureGroupVersion



   .. py:method:: list_models(project_id)

      Retrieves the list of models in the specified project.

      :param project_id: Unique string identifier associated with the project.
      :type project_id: str

      :returns: A list of models.
      :rtype: list[Model]



   .. py:method:: describe_model(model_id)

      Retrieves a full description of the specified model.

      :param model_id: Unique string identifier associated with the model.
      :type model_id: str

      :returns: Description of the model.
      :rtype: Model



   .. py:method:: get_model_metrics(model_id, model_version = None, return_graphs = False, validation = False)

      Retrieves metrics for all the algorithms trained in this model version.

      If only the model's unique identifier (model_id) is specified, the latest trained version of the model (model_version) is used.


      :param model_id: Unique string identifier for the model.
      :type model_id: str
      :param model_version: Version of the model.
      :type model_version: str
      :param return_graphs: If true, will return the information used for the graphs on the model metrics page such as PR Curve per label.
      :type return_graphs: bool
      :param validation: If true, will return the validation metrics instead of the test metrics.
      :type validation: bool

      :returns: An object containing the model metrics and explanations for what each metric means.
      :rtype: ModelMetrics



   .. py:method:: get_feature_group_schemas_for_model_version(model_version)

      Gets the schema (including feature mappings) for all feature groups used in the model version.

      :param model_version: Unique string identifier for the version of the model.
      :type model_version: str

      :returns: List of schema for all feature groups used in the model version.
      :rtype: list[ModelVersionFeatureGroupSchema]



   .. py:method:: list_model_versions(model_id, limit = 100, start_after_version = None)

      Retrieves a list of versions for a given model.

      :param model_id: Unique string identifier associated with the model.
      :type model_id: str
      :param limit: Maximum length of the list of all dataset versions.
      :type limit: int
      :param start_after_version: Unique string identifier of the version after which the list starts.
      :type start_after_version: str

      :returns: An array of model versions.
      :rtype: list[ModelVersion]



   .. py:method:: describe_model_version(model_version)

      Retrieves a full description of the specified model version.

      :param model_version: Unique string identifier of the model version.
      :type model_version: str

      :returns: A model version.
      :rtype: ModelVersion



   .. py:method:: get_feature_importance_by_model_version(model_version)

      Gets the feature importance calculated by various methods for the model.

      :param model_version: Unique string identifier for the model version.
      :type model_version: str

      :returns: Feature importances for the model.
      :rtype: FeatureImportance



   .. py:method:: get_training_data_logs(model_version)

      Retrieves the data preparation logs during model training.

      :param model_version: The unique version ID of the model version.
      :type model_version: str

      :returns: A list of logs.
      :rtype: list[DataPrepLogs]



   .. py:method:: get_training_logs(model_version, stdout = False, stderr = False)

      Returns training logs for the model.

      :param model_version: The unique version ID of the model version.
      :type model_version: str
      :param stdout: Set True to get info logs.
      :type stdout: bool
      :param stderr: Set True to get error logs.
      :type stderr: bool

      :returns: A function logs object.
      :rtype: FunctionLogs



   .. py:method:: describe_model_artifacts_export(model_artifacts_export_id)

      Get the description and status of the model artifacts export.

      :param model_artifacts_export_id: A unique string identifier for the export.
      :type model_artifacts_export_id: str

      :returns: Object describing the export and its status.
      :rtype: ModelArtifactsExport



   .. py:method:: list_model_artifacts_exports(model_id, limit = 25)

      List all the model artifacts exports.

      :param model_id: A unique string identifier for the model.
      :type model_id: str
      :param limit: Maximum length of the list of all exports.
      :type limit: int

      :returns: List of model artifacts exports.
      :rtype: list[ModelArtifactsExport]



   .. py:method:: list_model_monitors(project_id, limit = None)

      Retrieves the list of model monitors in the specified project.

      :param project_id: Unique string identifier associated with the project.
      :type project_id: str
      :param limit: Maximum number of model monitors to return. We'll have internal limit if not set.
      :type limit: int

      :returns: A list of model monitors.
      :rtype: list[ModelMonitor]



   .. py:method:: describe_model_monitor(model_monitor_id)

      Retrieves a full description of the specified model monitor.

      :param model_monitor_id: Unique string identifier associated with the model monitor.
      :type model_monitor_id: str

      :returns: Description of the model monitor.
      :rtype: ModelMonitor



   .. py:method:: get_prediction_drift(model_monitor_version)

      Gets the label and prediction drifts for a model monitor.

      :param model_monitor_version: Unique string identifier for a model monitor version created under the project.
      :type model_monitor_version: str

      :returns: Object describing training and prediction output label and prediction distributions.
      :rtype: DriftDistributions



   .. py:method:: get_model_monitor_summary(model_monitor_id)

      Gets the summary of a model monitor across versions.

      :param model_monitor_id: A unique string identifier associated with the model monitor.
      :type model_monitor_id: str

      :returns: An object describing integrity, bias violations, model accuracy and drift for the model monitor.
      :rtype: ModelMonitorSummary



   .. py:method:: list_model_monitor_versions(model_monitor_id, limit = 100, start_after_version = None)

      Retrieves a list of versions for a given model monitor.

      :param model_monitor_id: The unique ID associated with the model monitor.
      :type model_monitor_id: str
      :param limit: The maximum length of the list of all model monitor versions.
      :type limit: int
      :param start_after_version: The ID of the version after which the list starts.
      :type start_after_version: str

      :returns: A list of model monitor versions.
      :rtype: list[ModelMonitorVersion]



   .. py:method:: describe_model_monitor_version(model_monitor_version)

      Retrieves a full description of the specified model monitor version.

      :param model_monitor_version: The unique version ID of the model monitor version.
      :type model_monitor_version: str

      :returns: A model monitor version.
      :rtype: ModelMonitorVersion



   .. py:method:: model_monitor_version_metric_data(model_monitor_version, metric_type, actual_values_to_detail = None)

      Provides the data needed for decile metrics associated with the model monitor.

      :param model_monitor_version: Unique string identifier for the model monitor version.
      :type model_monitor_version: str
      :param metric_type: The type of metric to get data for.
      :type metric_type: str
      :param actual_values_to_detail: The actual values to detail.
      :type actual_values_to_detail: list

      :returns: Data associated with the metric.
      :rtype: ModelMonitorVersionMetricData



   .. py:method:: list_organization_model_monitors(only_starred = False)

      Gets a list of Model Monitors for an organization.

      :param only_starred: Whether to return only starred Model Monitors. Defaults to False.
      :type only_starred: bool

      :returns: A list of Model Monitors.
      :rtype: list[ModelMonitor]



   .. py:method:: get_model_monitor_chart_from_organization(chart_type, limit = 15)

      Gets a list of model monitor summaries across monitors for an organization.

      :param chart_type: Type of chart (model_accuracy, bias_violations, data_integrity, or model_drift) to return.
      :type chart_type: str
      :param limit: Maximum length of the model monitors.
      :type limit: int

      :returns: List of ModelMonitorSummaryForOrganization objects describing accuracy, bias, drift, or integrity for all model monitors in an organization.
      :rtype: list[ModelMonitorSummaryFromOrg]



   .. py:method:: get_model_monitor_summary_from_organization()

      Gets a consolidated summary of model monitors for an organization.

      :returns: A list of `ModelMonitorSummaryForOrganization` objects describing accuracy, bias, drift, and integrity for all model monitors in an organization.
      :rtype: list[ModelMonitorOrgSummary]



   .. py:method:: list_eda(project_id)

      Retrieves the list of Exploratory Data Analysis (EDA) in the specified project.

      :param project_id: Unique string identifier associated with the project.
      :type project_id: str

      :returns: List of EDA objects.
      :rtype: list[Eda]



   .. py:method:: describe_eda(eda_id)

      Retrieves a full description of the specified EDA object.

      :param eda_id: Unique string identifier associated with the EDA object.
      :type eda_id: str

      :returns: Description of the EDA object.
      :rtype: Eda



   .. py:method:: list_eda_versions(eda_id, limit = 100, start_after_version = None)

      Retrieves a list of versions for a given EDA object.

      :param eda_id: The unique ID associated with the EDA object.
      :type eda_id: str
      :param limit: The maximum length of the list of all EDA versions.
      :type limit: int
      :param start_after_version: The ID of the version after which the list starts.
      :type start_after_version: str

      :returns: A list of EDA versions.
      :rtype: list[EdaVersion]



   .. py:method:: describe_eda_version(eda_version)

      Retrieves a full description of the specified EDA version.

      :param eda_version: Unique string identifier of the EDA version.
      :type eda_version: str

      :returns: An EDA version.
      :rtype: EdaVersion



   .. py:method:: get_eda_collinearity(eda_version)

      Gets the Collinearity between all features for the Exploratory Data Analysis.

      :param eda_version: Unique string identifier associated with the EDA instance.
      :type eda_version: str

      :returns: An object with a record of correlations between each feature for the EDA.
      :rtype: EdaCollinearity



   .. py:method:: get_eda_data_consistency(eda_version, transformation_feature = None)

      Gets the data consistency for the Exploratory Data Analysis.

      :param eda_version: Unique string identifier associated with the EDA instance.
      :type eda_version: str
      :param transformation_feature: The transformation feature to get consistency for.
      :type transformation_feature: str

      :returns: Object with duplication, deletion, and transformation data for data consistency analysis for an EDA.
      :rtype: EdaDataConsistency



   .. py:method:: get_collinearity_for_feature(eda_version, feature_name = None)

      Gets the Collinearity for the given feature from the Exploratory Data Analysis.

      :param eda_version: Unique string identifier associated with the EDA instance.
      :type eda_version: str
      :param feature_name: Name of the feature for which correlation is shown.
      :type feature_name: str

      :returns: Object with a record of correlations for the provided feature for an EDA.
      :rtype: EdaFeatureCollinearity



   .. py:method:: get_feature_association(eda_version, reference_feature_name, test_feature_name)

      Gets the Feature Association for the given features from the feature group version within the eda_version.

      :param eda_version: Unique string identifier associated with the EDA instance.
      :type eda_version: str
      :param reference_feature_name: Name of the feature for feature association (on x-axis for the plots generated for the Feature association in the product).
      :type reference_feature_name: str
      :param test_feature_name: Name of the feature for feature association (on y-axis for the plots generated for the Feature association in the product).
      :type test_feature_name: str

      :returns: An object with a record of data for the feature association between the two given features for an EDA version.
      :rtype: EdaFeatureAssociation



   .. py:method:: get_eda_forecasting_analysis(eda_version)

      Gets the Forecasting analysis for the Exploratory Data Analysis.

      :param eda_version: Unique string identifier associated with the EDA version.
      :type eda_version: str

      :returns: Object with forecasting analysis that includes sales_across_time, cummulative_contribution, missing_value_distribution, history_length, num_rows_histogram, product_maturity data.
      :rtype: EdaForecastingAnalysis



   .. py:method:: list_holdout_analysis(project_id, model_id = None)

      List holdout analyses for a project. Optionally, filter by model.

      :param project_id: ID of the project to list holdout analyses for
      :type project_id: str
      :param model_id: (optional) ID of the model to filter by
      :type model_id: str

      :returns: The holdout analyses
      :rtype: list[HoldoutAnalysis]



   .. py:method:: describe_holdout_analysis(holdout_analysis_id)

      Get a holdout analysis.

      :param holdout_analysis_id: ID of the holdout analysis to get
      :type holdout_analysis_id: str

      :returns: The holdout analysis
      :rtype: HoldoutAnalysis



   .. py:method:: list_holdout_analysis_versions(holdout_analysis_id)

      List holdout analysis versions for a holdout analysis.

      :param holdout_analysis_id: ID of the holdout analysis to list holdout analysis versions for
      :type holdout_analysis_id: str

      :returns: The holdout analysis versions
      :rtype: list[HoldoutAnalysisVersion]



   .. py:method:: describe_holdout_analysis_version(holdout_analysis_version, get_metrics = False)

      Get a holdout analysis version.

      :param holdout_analysis_version: ID of the holdout analysis version to get
      :type holdout_analysis_version: str
      :param get_metrics: (optional) Whether to get the metrics for the holdout analysis version
      :type get_metrics: bool

      :returns: The holdout analysis version
      :rtype: HoldoutAnalysisVersion



   .. py:method:: describe_monitor_alert(monitor_alert_id)

      Describes a given monitor alert id

      :param monitor_alert_id: Unique identifier of the monitor alert.
      :type monitor_alert_id: str

      :returns: Object containing information about the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: describe_monitor_alert_version(monitor_alert_version)

      Describes a given monitor alert version id

      :param monitor_alert_version: Unique string identifier for the monitor alert.
      :type monitor_alert_version: str

      :returns: An object describing the monitor alert version.
      :rtype: MonitorAlertVersion



   .. py:method:: list_monitor_alerts_for_monitor(model_monitor_id = None, realtime_monitor_id = None)

      Retrieves the list of monitor alerts for a specified monitor. One of the model_monitor_id or realtime_monitor_id is required but not both.

      :param model_monitor_id: The unique ID associated with the model monitor.
      :type model_monitor_id: str
      :param realtime_monitor_id: The unique ID associated with the real-time monitor.
      :type realtime_monitor_id: str

      :returns: A list of monitor alerts.
      :rtype: list[MonitorAlert]



   .. py:method:: list_monitor_alert_versions_for_monitor_version(model_monitor_version)

      Retrieves the list of monitor alert versions for a specified monitor instance.

      :param model_monitor_version: The unique ID associated with the model monitor.
      :type model_monitor_version: str

      :returns: A list of monitor alert versions.
      :rtype: list[MonitorAlertVersion]



   .. py:method:: get_drift_for_feature(model_monitor_version, feature_name, nested_feature_name = None)

      Gets the feature drift associated with a single feature in an output feature group from a prediction.

      :param model_monitor_version: Unique string identifier of a model monitor version created under the project.
      :type model_monitor_version: str
      :param feature_name: Name of the feature to view the distribution of.
      :type feature_name: str
      :param nested_feature_name: Optionally, the name of the nested feature that the feature is in.
      :type nested_feature_name: str

      :returns: An object describing the training and prediction output feature distributions.
      :rtype: FeatureDistribution



   .. py:method:: get_outliers_for_feature(model_monitor_version, feature_name = None, nested_feature_name = None)

      Gets a list of outliers measured by a single feature (or overall) in an output feature group from a prediction.

      :param model_monitor_version: Unique string identifier for a model monitor version created under the project.
      :type model_monitor_version: str
      :param feature_name: Name of the feature to view the distribution of.
      :type feature_name: str
      :param nested_feature_name: Optionally, the name of the nested feature that the feature is in.
      :type nested_feature_name: str



   .. py:method:: describe_prediction_operator(prediction_operator_id)

      Describe an existing prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str

      :returns: The requested prediction operator object.
      :rtype: PredictionOperator



   .. py:method:: list_prediction_operators(project_id)

      List all the prediction operators inside a project.

      :param project_id: The unique ID of the project.
      :type project_id: str

      :returns: A list of prediction operator objects.
      :rtype: list[PredictionOperator]



   .. py:method:: list_prediction_operator_versions(prediction_operator_id)

      List all the prediction operator versions for a prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str

      :returns: A list of prediction operator version objects.
      :rtype: list[PredictionOperatorVersion]



   .. py:method:: describe_deployment(deployment_id)

      Retrieves a full description of the specified deployment.

      :param deployment_id: Unique string identifier associated with the deployment.
      :type deployment_id: str

      :returns: Description of the deployment.
      :rtype: Deployment



   .. py:method:: list_deployments(project_id)

      Retrieves a list of all deployments in the specified project.

      :param project_id: The unique identifier associated with the project.
      :type project_id: str

      :returns: An array of deployments.
      :rtype: list[Deployment]



   .. py:method:: list_deployment_tokens(project_id)

      Retrieves a list of all deployment tokens associated with the specified project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str

      :returns: A list of deployment tokens.
      :rtype: list[DeploymentAuthToken]



   .. py:method:: get_api_endpoint(deployment_token = None, deployment_id = None, streaming_token = None, feature_group_id = None, model_id = None)

      Returns the API endpoint specific to an organization. This function can be utilized using either an API Key or a deployment ID and token for authentication.

      :param deployment_token: Token used for authenticating access to deployed models.
      :type deployment_token: str
      :param deployment_id: Unique identifier assigned to a deployment created under the specified project.
      :type deployment_id: str
      :param streaming_token: Token used for authenticating access to streaming data.
      :type streaming_token: str
      :param feature_group_id: Unique identifier assigned to a feature group.
      :type feature_group_id: str
      :param model_id: Unique identifier assigned to a model.
      :type model_id: str

      :returns: The API endpoint specific to the organization.
      :rtype: ApiEndpoint



   .. py:method:: get_model_training_types_for_deployment(model_id, model_version = None, algorithm = None)

      Returns types of models that can be deployed for a given model instance ID.

      :param model_id: The unique ID associated with the model.
      :type model_id: str
      :param model_version: The unique ID associated with the model version to deploy.
      :type model_version: str
      :param algorithm: The unique ID associated with the algorithm to deploy.
      :type algorithm: str

      :returns: Model training types for deployment.
      :rtype: ModelTrainingTypeForDeployment



   .. py:method:: get_prediction_logs_records(deployment_id, limit = 10, last_log_request_id = '', last_log_timestamp = None)

      Retrieves the prediction request IDs for the most recent predictions made to the deployment.

      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param limit: The number of prediction log entries to retrieve up to the specified limit.
      :type limit: int
      :param last_log_request_id: The request ID of the last log entry to retrieve.
      :type last_log_request_id: str
      :param last_log_timestamp: A Unix timestamp in milliseconds specifying the timestamp for the last log entry.
      :type last_log_timestamp: int

      :returns: A list of prediction log records.
      :rtype: list[PredictionLogRecord]



   .. py:method:: list_deployment_alerts(deployment_id)

      List the monitor alerts associated with the deployment id.

      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str

      :returns: An array of deployment alerts.
      :rtype: list[MonitorAlert]



   .. py:method:: list_realtime_monitors(project_id)

      List the real-time monitors associated with the deployment id.

      :param project_id: Unique string identifier for the deployment.
      :type project_id: str

      :returns: An array of real-time monitors.
      :rtype: list[RealtimeMonitor]



   .. py:method:: describe_realtime_monitor(realtime_monitor_id)

      Get the real-time monitor associated with the real-time monitor id.

      :param realtime_monitor_id: Unique string identifier for the real-time monitor.
      :type realtime_monitor_id: str

      :returns: Object describing the real-time monitor.
      :rtype: RealtimeMonitor



   .. py:method:: describe_refresh_policy(refresh_policy_id)

      Retrieve a single refresh policy

      :param refresh_policy_id: The unique ID associated with this refresh policy.
      :type refresh_policy_id: str

      :returns: An object representing the refresh policy.
      :rtype: RefreshPolicy



   .. py:method:: describe_refresh_pipeline_run(refresh_pipeline_run_id)

      Retrieve a single refresh pipeline run

      :param refresh_pipeline_run_id: Unique string identifier associated with the refresh pipeline run.
      :type refresh_pipeline_run_id: str

      :returns: A refresh pipeline run object.
      :rtype: RefreshPipelineRun



   .. py:method:: list_refresh_policies(project_id = None, dataset_ids = [], feature_group_id = None, model_ids = [], deployment_ids = [], batch_prediction_ids = [], model_monitor_ids = [], notebook_ids = [])

      List the refresh policies for the organization. If no filters are specified, all refresh policies are returned.

      :param project_id: Project ID for which we wish to see the refresh policies attached.
      :type project_id: str
      :param dataset_ids: Comma-separated list of Dataset IDs.
      :type dataset_ids: List
      :param feature_group_id: Feature Group ID for which we wish to see the refresh policies attached.
      :type feature_group_id: str
      :param model_ids: Comma-separated list of Model IDs.
      :type model_ids: List
      :param deployment_ids: Comma-separated list of Deployment IDs.
      :type deployment_ids: List
      :param batch_prediction_ids: Comma-separated list of Batch Prediction IDs.
      :type batch_prediction_ids: List
      :param model_monitor_ids: Comma-separated list of Model Monitor IDs.
      :type model_monitor_ids: List
      :param notebook_ids: Comma-separated list of Notebook IDs.
      :type notebook_ids: List

      :returns: List of all refresh policies in the organization.
      :rtype: list[RefreshPolicy]



   .. py:method:: list_refresh_pipeline_runs(refresh_policy_id)

      List the the times that the refresh policy has been run

      :param refresh_policy_id: Unique identifier associated with the refresh policy.
      :type refresh_policy_id: str

      :returns: List of refresh pipeline runs for the given refresh policy ID.
      :rtype: list[RefreshPipelineRun]



   .. py:method:: download_batch_prediction_result_chunk(batch_prediction_version, offset = 0, chunk_size = 10485760)

      Returns a stream containing the batch prediction results.

      :param batch_prediction_version: Unique string identifier of the batch prediction version to get the results from.
      :type batch_prediction_version: str
      :param offset: The offset to read from.
      :type offset: int
      :param chunk_size: The maximum amount of data to read.
      :type chunk_size: int



   .. py:method:: get_batch_prediction_connector_errors(batch_prediction_version)

      Returns a stream containing the batch prediction database connection write errors, if any writes failed for the specified batch prediction job.

      :param batch_prediction_version: Unique string identifier of the batch prediction job to get the errors for.
      :type batch_prediction_version: str



   .. py:method:: list_batch_predictions(project_id, limit = None)

      Retrieves a list of batch predictions in the project.

      :param project_id: Unique string identifier of the project.
      :type project_id: str
      :param limit: Maximum number of batch predictions to return. We'll have internal limit if not set.
      :type limit: int

      :returns: List of batch prediction jobs.
      :rtype: list[BatchPrediction]



   .. py:method:: describe_batch_prediction(batch_prediction_id)

      Describe the batch prediction.

      :param batch_prediction_id: The unique identifier associated with the batch prediction.
      :type batch_prediction_id: str

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: list_batch_prediction_versions(batch_prediction_id, limit = 100, start_after_version = None)

      Retrieves a list of versions of a given batch prediction

      :param batch_prediction_id: Unique identifier of the batch prediction.
      :type batch_prediction_id: str
      :param limit: Number of versions to list.
      :type limit: int
      :param start_after_version: Version to start after.
      :type start_after_version: str

      :returns: List of batch prediction versions.
      :rtype: list[BatchPredictionVersion]



   .. py:method:: describe_batch_prediction_version(batch_prediction_version)

      Describes a Batch Prediction Version.

      :param batch_prediction_version: Unique string identifier of the Batch Prediction Version.
      :type batch_prediction_version: str

      :returns: The Batch Prediction Version.
      :rtype: BatchPredictionVersion



   .. py:method:: get_batch_prediction_version_logs(batch_prediction_version)

      Retrieves the batch prediction logs.

      :param batch_prediction_version: The unique version ID of the batch prediction version.
      :type batch_prediction_version: str

      :returns: The logs for the specified batch prediction version.
      :rtype: BatchPredictionVersionLogs



   .. py:method:: get_deployment_statistics_over_time(deployment_id, start_date, end_date)

      Return basic access statistics for the given window

      :param deployment_id: Unique string identifier of the deployment created under the project.
      :type deployment_id: str
      :param start_date: Timeline start date in ISO format.
      :type start_date: str
      :param end_date: Timeline end date in ISO format. The date range must be 7 days or less.
      :type end_date: str

      :returns: Object describing Time series data of the number of requests and latency over the specified time period.
      :rtype: DeploymentStatistics



   .. py:method:: get_data(feature_group_id, primary_key = None, num_rows = None)

      Gets the feature group rows for online updatable feature groups.

      If primary key is set, row corresponding to primary_key is returned.
      If num_rows is set, we return maximum of num_rows latest updated rows.


      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param primary_key: The primary key value for which to retrieve the feature group row (only for online feature groups).
      :type primary_key: str
      :param num_rows: Maximum number of rows to return from the feature group
      :type num_rows: int

      :returns: A list of feature group rows.
      :rtype: list[FeatureGroupRow]



   .. py:method:: describe_python_function(name)

      Describe a Python Function.

      :param name: The name to identify the Python function. Must be a valid Python identifier.
      :type name: str

      :returns: The Python function object.
      :rtype: PythonFunction



   .. py:method:: list_python_functions(function_type = 'FEATURE_GROUP')

      List all python functions within the organization.

      :param function_type: Optional argument to specify the type of function to list Python functions for. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
      :type function_type: str

      :returns: A list of PythonFunction objects.
      :rtype: list[PythonFunction]



   .. py:method:: list_pipelines(project_id = None)

      Lists the pipelines for an organization or a project

      :param project_id: Unique string identifier for the project to list graph dashboards from.
      :type project_id: str

      :returns: A list of pipelines.
      :rtype: list[Pipeline]



   .. py:method:: describe_pipeline_version(pipeline_version)

      Describes a specified pipeline version

      :param pipeline_version: Unique string identifier for the pipeline version
      :type pipeline_version: str

      :returns: Object describing the pipeline version
      :rtype: PipelineVersion



   .. py:method:: describe_pipeline_step(pipeline_step_id)

      Deletes a step from a pipeline.

      :param pipeline_step_id: The ID of the pipeline step.
      :type pipeline_step_id: str

      :returns: An object describing the pipeline step.
      :rtype: PipelineStep



   .. py:method:: describe_pipeline_step_by_name(pipeline_id, step_name)

      Describes a pipeline step by the step name.

      :param pipeline_id: The ID of the pipeline.
      :type pipeline_id: str
      :param step_name: The name of the step.
      :type step_name: str

      :returns: An object describing the pipeline step.
      :rtype: PipelineStep



   .. py:method:: describe_pipeline_step_version(pipeline_step_version)

      Describes a pipeline step version.

      :param pipeline_step_version: The ID of the pipeline step version.
      :type pipeline_step_version: str

      :returns: An object describing the pipeline step version.
      :rtype: PipelineStepVersion



   .. py:method:: list_pipeline_version_logs(pipeline_version)

      Gets the logs for the steps in a given pipeline version.

      :param pipeline_version: The id of the pipeline version.
      :type pipeline_version: str

      :returns: Object describing the logs for the steps in the pipeline.
      :rtype: PipelineVersionLogs



   .. py:method:: get_step_version_logs(pipeline_step_version)

      Gets the logs for a given step version.

      :param pipeline_step_version: The id of the pipeline step version.
      :type pipeline_step_version: str

      :returns: Object describing the pipeline step logs.
      :rtype: PipelineStepVersionLogs



   .. py:method:: describe_graph_dashboard(graph_dashboard_id)

      Describes a given graph dashboard.

      :param graph_dashboard_id: Unique identifier for the graph dashboard.
      :type graph_dashboard_id: str

      :returns: An object containing information about the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: list_graph_dashboards(project_id = None)

      Lists the graph dashboards for a project

      :param project_id: Unique string identifier for the project to list graph dashboards from.
      :type project_id: str

      :returns: A list of graph dashboards.
      :rtype: list[GraphDashboard]



   .. py:method:: describe_graph_for_dashboard(graph_reference_id)

      Describes a python plot to a graph dashboard

      :param graph_reference_id: Unique string identifier for the python function id for the graph
      :type graph_reference_id: str

      :returns: An object describing the graph dashboard.
      :rtype: PythonPlotFunction



   .. py:method:: describe_algorithm(algorithm)

      Retrieves a full description of the specified algorithm.

      :param algorithm: The name of the algorithm.
      :type algorithm: str

      :returns: The description of the algorithm.
      :rtype: Algorithm



   .. py:method:: list_algorithms(problem_type = None, project_id = None)

      List all custom algorithms, with optional filtering on Problem Type and Project ID

      :param problem_type: The problem type to query. If `None`, return all algorithms in the organization.
      :type problem_type: ProblemType
      :param project_id: The ID of the project.
      :type project_id: str

      :returns: A list of algorithms.
      :rtype: list[Algorithm]



   .. py:method:: describe_custom_loss_function(name)

      Retrieve a full description of a previously registered custom loss function.

      :param name: Registered name of the custom loss function.
      :type name: str

      :returns: The description of the custom loss function with the given name.
      :rtype: CustomLossFunction



   .. py:method:: list_custom_loss_functions(name_prefix = None, loss_function_type = None)

      Retrieves a list of registered custom loss functions and their descriptions.

      :param name_prefix: The prefix of the names of the loss functions to list.
      :type name_prefix: str
      :param loss_function_type: The category of loss functions to search in.
      :type loss_function_type: str

      :returns: The description of the custom loss function with the given name.
      :rtype: CustomLossFunction



   .. py:method:: describe_custom_metric(name)

      Retrieves a full description of a previously registered custom metric function.

      :param name: Registered name of the custom metric.
      :type name: str

      :returns: The description of the custom metric with the given name.
      :rtype: CustomMetric



   .. py:method:: describe_custom_metric_version(custom_metric_version)

      Describes a given custom metric version

      :param custom_metric_version: A unique string identifier for the custom metric version.
      :type custom_metric_version: str

      :returns: An object describing the custom metric version.
      :rtype: CustomMetricVersion



   .. py:method:: list_custom_metrics(name_prefix = None, problem_type = None)

      Retrieves a list of registered custom metrics.

      :param name_prefix: The prefix of the names of the custom metrics.
      :type name_prefix: str
      :param problem_type: The associated problem type of the custom metrics.
      :type problem_type: str

      :returns: A list of custom metrics.
      :rtype: list[CustomMetric]



   .. py:method:: describe_module(name)

      Retrieves a full description of the specified module.

      :param name: The name of the module.
      :type name: str

      :returns: The description of the module.
      :rtype: Module



   .. py:method:: list_modules()

      List all the modules

      :returns: A list of modules
      :rtype: list[Module]



   .. py:method:: get_organization_secret(secret_key)

      Gets a secret.

      :param secret_key: The secret key.
      :type secret_key: str

      :returns: The secret.
      :rtype: OrganizationSecret



   .. py:method:: list_organization_secrets()

      Lists all secrets for an organization.

      :returns: list of secrets belonging to the organization.
      :rtype: list[OrganizationSecret]



   .. py:method:: get_app_user_group_sign_in_token(user_group_id, email, name)

      Get a token for a user group user to sign in.

      :param user_group_id: The ID of the user group.
      :type user_group_id: str
      :param email: The email of the user.
      :type email: str
      :param name: The name of the user.
      :type name: str

      :returns: The token to sign in the user
      :rtype: AppUserGroupSignInToken



   .. py:method:: query_feature_group_code_generator(query, language, project_id = None)

      Send a query to the feature group code generator tool to generate code for the query.

      :param query: A natural language query which specifies what the user wants out of the feature group or its code.
      :type query: str
      :param language: The language in which code is to be generated. One of 'sql' or 'python'.
      :type language: str
      :param project_id: A unique string identifier of the project in context of which the query is.
      :type project_id: str

      :returns: The response from the model, raw text and parsed components.
      :rtype: LlmResponse



   .. py:method:: get_natural_language_explanation(feature_group_id = None, feature_group_version = None, model_id = None)

      Returns the saved natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version or Model

      :param feature_group_id: A unique string identifier associated with the Feature Group.
      :type feature_group_id: str
      :param feature_group_version: A unique string identifier associated with the Feature Group Version.
      :type feature_group_version: str
      :param model_id: A unique string identifier associated with the Model.
      :type model_id: str

      :returns: The object containing natural language explanation(s) as field(s).
      :rtype: NaturalLanguageExplanation



   .. py:method:: generate_natural_language_explanation(feature_group_id = None, feature_group_version = None, model_id = None)

      Generates natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version or Model

      :param feature_group_id: A unique string identifier associated with the Feature Group.
      :type feature_group_id: str
      :param feature_group_version: A unique string identifier associated with the Feature Group Version.
      :type feature_group_version: str
      :param model_id: A unique string identifier associated with the Model.
      :type model_id: str

      :returns: The object containing natural language explanation(s) as field(s).
      :rtype: NaturalLanguageExplanation



   .. py:method:: get_chat_session(chat_session_id)

      Gets a chat session from Data Science Co-pilot.

      :param chat_session_id: Unique ID of the chat session.
      :type chat_session_id: str

      :returns: The chat session with Data Science Co-pilot
      :rtype: ChatSession



   .. py:method:: list_chat_sessions(most_recent_per_project = False)

      Lists all chat sessions for the current user

      :param most_recent_per_project: An optional parameter whether to only return the most recent chat session per project. Default False.
      :type most_recent_per_project: bool

      :returns: The chat sessions with Data Science Co-pilot
      :rtype: ChatSession



   .. py:method:: get_deployment_conversation(deployment_conversation_id = None, external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False)

      Gets a deployment conversation.

      :param deployment_conversation_id: Unique ID of the conversation. One of deployment_conversation_id or external_session_id must be provided.
      :type deployment_conversation_id: str
      :param external_session_id: External session ID of the conversation.
      :type external_session_id: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str
      :param filter_intermediate_conversation_events: If true, intermediate conversation events will be filtered out. Default is true.
      :type filter_intermediate_conversation_events: bool
      :param get_unused_document_uploads: If true, unused document uploads will be returned. Default is false.
      :type get_unused_document_uploads: bool

      :returns: The deployment conversation.
      :rtype: DeploymentConversation



   .. py:method:: list_deployment_conversations(deployment_id = None, external_application_id = None, conversation_type = None, fetch_last_llm_info = False, limit = None, search = None)

      Lists all conversations for the given deployment and current user.

      :param deployment_id: The deployment to get conversations for.
      :type deployment_id: str
      :param external_application_id: The external application id associated with the deployment conversation. If specified, only conversations created on that application will be listed.
      :type external_application_id: str
      :param conversation_type: The type of the conversation indicating its origin.
      :type conversation_type: DeploymentConversationType
      :param fetch_last_llm_info: If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
      :type fetch_last_llm_info: bool
      :param limit: The number of conversations to return. Defaults to 600.
      :type limit: int
      :param search: The search query to filter conversations by title.
      :type search: str

      :returns: The deployment conversations.
      :rtype: list[DeploymentConversation]



   .. py:method:: export_deployment_conversation(deployment_conversation_id = None, external_session_id = None)

      Export a Deployment Conversation.

      :param deployment_conversation_id: A unique string identifier associated with the deployment conversation.
      :type deployment_conversation_id: str
      :param external_session_id: The external session id associated with the deployment conversation. One of deployment_conversation_id or external_session_id must be provided.
      :type external_session_id: str

      :returns: The deployment conversation html export.
      :rtype: DeploymentConversationExport



   .. py:method:: get_app_user_group(user_group_id)

      Gets an App User Group.

      :param user_group_id: The ID of the App User Group.
      :type user_group_id: str

      :returns: The App User Group.
      :rtype: AppUserGroup



   .. py:method:: describe_external_application(external_application_id)

      Describes an External Application.

      :param external_application_id: The ID of the External Application.
      :type external_application_id: str

      :returns: The External Application.
      :rtype: ExternalApplication



   .. py:method:: list_external_applications()

      Lists External Applications in an organization.

      :returns: List of External Applications.
      :rtype: list[ExternalApplication]



   .. py:method:: download_agent_attachment(deployment_id, attachment_id)

      Return an agent attachment.

      :param deployment_id: The deployment ID.
      :type deployment_id: str
      :param attachment_id: The attachment ID.
      :type attachment_id: str



   .. py:method:: describe_agent(agent_id)

      Retrieves a full description of the specified model.

      :param agent_id: Unique string identifier associated with the model.
      :type agent_id: str

      :returns: Description of the agent.
      :rtype: Agent



   .. py:method:: describe_agent_version(agent_version)

      Retrieves a full description of the specified agent version.

      :param agent_version: Unique string identifier of the agent version.
      :type agent_version: str

      :returns: A agent version.
      :rtype: AgentVersion



   .. py:method:: search_feature_groups(text, num_results = 10, project_id = None, feature_group_ids = None)

      Search feature groups based on text and filters.

      :param text: Text to use for approximately matching feature groups.
      :type text: str
      :param num_results: The maximum number of search results to retrieve. The length of the returned list is less than or equal to num_results.
      :type num_results: int
      :param project_id: The ID of the project in which to restrict the search, if specified.
      :type project_id: str
      :param feature_group_ids: A list of feagure group IDs to restrict the search to.
      :type feature_group_ids: List

      :returns: A list of search results, each containing the retrieved object and its relevance score
      :rtype: list[OrganizationSearchResult]



   .. py:method:: list_agents(project_id)

      Retrieves the list of agents in the specified project.

      :param project_id: The unique identifier associated with the project.
      :type project_id: str

      :returns: A list of agents in the project.
      :rtype: list[Agent]



   .. py:method:: list_agent_versions(agent_id, limit = 100, start_after_version = None)

      List all versions of an agent.

      :param agent_id: The unique identifier associated with the agent.
      :type agent_id: str
      :param limit: If provided, limits the number of agent versions returned.
      :type limit: int
      :param start_after_version: Unique string identifier of the version after which the list starts.
      :type start_after_version: str

      :returns: An array of Agent versions.
      :rtype: list[AgentVersion]



   .. py:method:: copy_agent(agent_id, project_id = None)

      Creates a copy of the input agent

      :param agent_id: The unique id of the agent whose copy is to be generated.
      :type agent_id: str
      :param project_id: Project id to create the new agent to. By default it picks up the source agent's project id.
      :type project_id: str

      :returns: The newly generated agent.
      :rtype: Agent



   .. py:method:: list_llm_apps()

      Lists all available LLM Apps, which are LLMs tailored to achieve a specific task like code generation for a specific service's API.

      :returns: A list of LLM Apps.
      :rtype: list[LlmApp]



   .. py:method:: list_document_retrievers(project_id, limit = 100, start_after_id = None)

      List all the document retrievers.

      :param project_id: The ID of project that the document retriever is created in.
      :type project_id: str
      :param limit: The number of document retrievers to return.
      :type limit: int
      :param start_after_id: An offset parameter to exclude all document retrievers up to this specified ID.
      :type start_after_id: str

      :returns: All the document retrievers in the organization associated with the specified project.
      :rtype: list[DocumentRetriever]



   .. py:method:: describe_document_retriever(document_retriever_id)

      Describe a Document Retriever.

      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str

      :returns: The document retriever object.
      :rtype: DocumentRetriever



   .. py:method:: describe_document_retriever_by_name(name)

      Describe a document retriever by its name.

      :param name: The unique name of the document retriever to look up.
      :type name: str

      :returns: The Document Retriever.
      :rtype: DocumentRetriever



   .. py:method:: list_document_retriever_versions(document_retriever_id, limit = 100, start_after_version = None)

      List all the document retriever versions with a given ID.

      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str
      :param limit: The number of vector store versions to retrieve. The maximum value is 100.
      :type limit: int
      :param start_after_version: An offset parameter to exclude all document retriever versions up to this specified one.
      :type start_after_version: str

      :returns: All the document retriever versions associated with the document retriever.
      :rtype: list[DocumentRetrieverVersion]



   .. py:method:: describe_document_retriever_version(document_retriever_version)

      Describe a document retriever version.

      :param document_retriever_version: A unique string identifier associated with the document retriever version.
      :type document_retriever_version: str

      :returns: The document retriever version object.
      :rtype: DocumentRetrieverVersion



.. py:data:: _request_context

.. py:class:: CodeAgentResponse(client, deploymentConversationId=None, messages=None, toolUseRequest=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A response from a Code Agent

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param deploymentConversationId: The unique identifier of the deployment conversation.
   :type deploymentConversationId: str
   :param messages: The conversation messages in the chat.
   :type messages: list
   :param toolUseRequest: A request to use an external tool. Contains: - id (str): Unique identifier for the tool use request - input (dict): Input parameters for the tool, e.g. {'command': 'ls'} - name (str): Name of the tool being used, e.g. 'bash' - type (str): Always 'tool_use' to identify this as a tool request
   :type toolUseRequest: dict


   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: messages
      :value: None



   .. py:attribute:: tool_use_request
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CodeAutocompleteResponse(client, autocompleteResponse=None, showAutocomplete=None, lineNumber=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A autocomplete response from an LLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param autocompleteResponse: autocomplete code
   :type autocompleteResponse: str
   :param showAutocomplete: Whether to show autocomplete in the client
   :type showAutocomplete: bool
   :param lineNumber: The line number where autocomplete should be shown
   :type lineNumber: int


   .. py:attribute:: autocomplete_response
      :value: None



   .. py:attribute:: show_autocomplete
      :value: None



   .. py:attribute:: line_number
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CodeBot(client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, codeEditSupported=None, isPremium=None, llmBotIcon=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A bot option for CodeLLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param llmName: The name of the LLM.
   :type llmName: str
   :param name: The name of the bot.
   :type name: str
   :param imageUploadSupported: Whether the LLM supports image upload.
   :type imageUploadSupported: bool
   :param codeAgentSupported: Whether the LLM supports code agent.
   :type codeAgentSupported: bool
   :param codeEditSupported: Whether the LLM supports code edit.
   :type codeEditSupported: bool
   :param isPremium: Whether the LLM is a premium LLM.
   :type isPremium: bool
   :param llmBotIcon: The icon of the LLM bot.
   :type llmBotIcon: str


   .. py:attribute:: llm_name
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: image_upload_supported
      :value: None



   .. py:attribute:: code_agent_supported
      :value: None



   .. py:attribute:: code_edit_supported
      :value: None



   .. py:attribute:: is_premium
      :value: None



   .. py:attribute:: llm_bot_icon
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CodeEdit(client, filePath=None, startLine=None, endLine=None, text=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A code edit response from an LLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param filePath: The path of the file to be edited.
   :type filePath: str
   :param startLine: The start line of the code to be replaced.
   :type startLine: int
   :param endLine: The end line of the code to be replaced.
   :type endLine: int
   :param text: The new text.
   :type text: str


   .. py:attribute:: file_path
      :value: None



   .. py:attribute:: start_line
      :value: None



   .. py:attribute:: end_line
      :value: None



   .. py:attribute:: text
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CodeEditResponse(client, codeChanges=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A code edit response from an LLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param codeChanges: The code changes to be applied.
   :type codeChanges: list


   .. py:attribute:: code_changes
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CodeEdits(client, codeEdits=None, codeChanges=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A code edit response from an LLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param codeEdits: The code changes to be applied.
   :type codeEdits: list[codeedit]
   :param codeChanges: The code changes to be applied.
   :type codeChanges: list


   .. py:attribute:: code_edits
      :value: None



   .. py:attribute:: code_changes
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CodeSource(client, sourceType=None, sourceCode=None, applicationConnectorId=None, applicationConnectorInfo=None, packageRequirements=None, status=None, error=None, publishingMsg=None, moduleDependencies=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Code source for python-based custom feature groups and models

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param sourceType: The type of the source, one of TEXT, PYTHON, FILE_UPLOAD, or APPLICATION_CONNECTOR
   :type sourceType: str
   :param sourceCode: If the type of the source is TEXT, the raw text of the function
   :type sourceCode: str
   :param applicationConnectorId: The Application Connector to fetch the code from
   :type applicationConnectorId: str
   :param applicationConnectorInfo: Args passed to the application connector to fetch the code
   :type applicationConnectorInfo: str
   :param packageRequirements: The pip package dependencies required to run the code
   :type packageRequirements: list
   :param status: The status of the code and validations
   :type status: str
   :param error: If the status is failed, an error message describing what went wrong
   :type error: str
   :param publishingMsg: Warnings in the source code
   :type publishingMsg: dict
   :param moduleDependencies: The list of internal modules dependencies required to run the code
   :type moduleDependencies: list


   .. py:attribute:: source_type
      :value: None



   .. py:attribute:: source_code
      :value: None



   .. py:attribute:: application_connector_id
      :value: None



   .. py:attribute:: application_connector_info
      :value: None



   .. py:attribute:: package_requirements
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: publishing_msg
      :value: None



   .. py:attribute:: module_dependencies
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: import_as_cell()

      Adds the source code as an unexecuted cell in the notebook.



.. py:class:: CodeSuggestionValidationResponse(client, isValid=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A response from an LLM to validate a code suggestion.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param isValid: Whether the code suggestion is valid.
   :type isValid: bool


   .. py:attribute:: is_valid
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ComputePointInfo(client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None, alwaysDisplay=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The compute point info of the organization

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param updatedAt: The last time the compute point info was updated
   :type updatedAt: str
   :param last24HoursUsage: The 24 hours usage of the organization
   :type last24HoursUsage: int
   :param last7DaysUsage: The 7 days usage of the organization
   :type last7DaysUsage: int
   :param currMonthAvailPoints: The current month's available compute points
   :type currMonthAvailPoints: int
   :param currMonthUsage: The current month's usage compute points
   :type currMonthUsage: int
   :param lastThrottlePopUp: The last time the organization was throttled
   :type lastThrottlePopUp: str
   :param alwaysDisplay: Whether to always display the compute point toggle
   :type alwaysDisplay: bool


   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: last_24_hours_usage
      :value: None



   .. py:attribute:: last_7_days_usage
      :value: None



   .. py:attribute:: curr_month_avail_points
      :value: None



   .. py:attribute:: curr_month_usage
      :value: None



   .. py:attribute:: last_throttle_pop_up
      :value: None



   .. py:attribute:: always_display
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ConcatenationConfig(client, concatenatedTable=None, mergeType=None, replaceUntilTimestamp=None, skipMaterialize=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Feature Group Concatenation Config

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param concatenatedTable: The feature group to concatenate with the destination feature group.
   :type concatenatedTable: str
   :param mergeType: The type of merge to perform, either `UNION` or `INTERSECTION`.
   :type mergeType: str
   :param replaceUntilTimestamp: The Unix timestamp to specify the point up to which data from the source feature group will be replaced.
   :type replaceUntilTimestamp: int
   :param skipMaterialize: If `True`, the concatenated feature group will not be materialized.
   :type skipMaterialize: bool


   .. py:attribute:: concatenated_table
      :value: None



   .. py:attribute:: merge_type
      :value: None



   .. py:attribute:: replace_until_timestamp
      :value: None



   .. py:attribute:: skip_materialize
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ConstantsAutocompleteResponse(client, maxPendingRequests=None, acceptanceDelay=None, debounceDelay=None, recordUserAction=None, validateSuggestion=None, validationLinesThreshold=None, maxTrackedRecentChanges=None, diffThreshold=None, derivativeThreshold=None, defaultSurroundingLines=None, maxTrackedVisitChanges=None, selectionCooldownMs=None, viewingCooldownMs=None, maxLines=None, editCooldownMs=None, scrollDebounceMs=None, lspDeadline=None, diagnosticsThreshold=None, diagnosticEachThreshold=None, numVsCodeSuggestions=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A dictionary of constants to be used in the autocomplete.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param maxPendingRequests: The maximum number of pending requests.
   :type maxPendingRequests: int
   :param acceptanceDelay: The acceptance delay.
   :type acceptanceDelay: int
   :param debounceDelay: The debounce delay.
   :type debounceDelay: int
   :param recordUserAction: Whether to record user action.
   :type recordUserAction: bool
   :param validateSuggestion: Whether to validate the suggestion.
   :type validateSuggestion: bool
   :param validationLinesThreshold: The number of lines to validate the suggestion.
   :type validationLinesThreshold: int
   :param maxTrackedRecentChanges: The maximum number of recent file changes to track.
   :type maxTrackedRecentChanges: int
   :param diffThreshold: The diff operations threshold.
   :type diffThreshold: int
   :param derivativeThreshold: The derivative threshold for deletions
   :type derivativeThreshold: int
   :param defaultSurroundingLines: The default number of surrounding lines to include in the recently visited context.
   :type defaultSurroundingLines: int
   :param maxTrackedVisitChanges: The maximum number of recently visited ranges to track.
   :type maxTrackedVisitChanges: int
   :param selectionCooldownMs: The cooldown time in milliseconds for selection changes.
   :type selectionCooldownMs: int
   :param viewingCooldownMs: The cooldown time in milliseconds for viewing changes.
   :type viewingCooldownMs: int
   :param maxLines: The maximum number of lines to include in recently visited context.
   :type maxLines: int
   :param editCooldownMs: The cooldown time in milliseconds after last edit.
   :type editCooldownMs: int
   :param scrollDebounceMs: The debounce time in milliseconds for scroll events.
   :type scrollDebounceMs: int
   :param lspDeadline: The deadline in milliseconds for LSP context.
   :type lspDeadline: int
   :param diagnosticsThreshold: The max number of diagnostics to show.
   :type diagnosticsThreshold: int
   :param diagnosticEachThreshold: The max number of characters to show for each diagnostic type.
   :type diagnosticEachThreshold: int
   :param numVsCodeSuggestions: The number of VS Code suggestions to show.
   :type numVsCodeSuggestions: int


   .. py:attribute:: max_pending_requests
      :value: None



   .. py:attribute:: acceptance_delay
      :value: None



   .. py:attribute:: debounce_delay
      :value: None



   .. py:attribute:: record_user_action
      :value: None



   .. py:attribute:: validate_suggestion
      :value: None



   .. py:attribute:: validation_lines_threshold
      :value: None



   .. py:attribute:: max_tracked_recent_changes
      :value: None



   .. py:attribute:: diff_threshold
      :value: None



   .. py:attribute:: derivative_threshold
      :value: None



   .. py:attribute:: default_surrounding_lines
      :value: None



   .. py:attribute:: max_tracked_visit_changes
      :value: None



   .. py:attribute:: selection_cooldown_ms
      :value: None



   .. py:attribute:: viewing_cooldown_ms
      :value: None



   .. py:attribute:: max_lines
      :value: None



   .. py:attribute:: edit_cooldown_ms
      :value: None



   .. py:attribute:: scroll_debounce_ms
      :value: None



   .. py:attribute:: lsp_deadline
      :value: None



   .. py:attribute:: diagnostics_threshold
      :value: None



   .. py:attribute:: diagnostic_each_threshold
      :value: None



   .. py:attribute:: num_vs_code_suggestions
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CpuGpuMemorySpecs(client, default=None, data=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Includes the memory specs of the CPU/GPU

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param default: the default memory size for the processing unit
   :type default: int
   :param data: the list of memory sizes for the processing unit
   :type data: list


   .. py:attribute:: default
      :value: None



   .. py:attribute:: data
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CustomChatInstructions(client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, experimentalFeatures=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Custom Chat Instructions

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param userInformationInstructions: The behavior instructions for the chat.
   :type userInformationInstructions: str
   :param responseInstructions: The response instructions for the chat.
   :type responseInstructions: str
   :param enableCodeExecution: Whether or not code execution is enabled.
   :type enableCodeExecution: bool
   :param enableImageGeneration: Whether or not image generation is enabled.
   :type enableImageGeneration: bool
   :param enableWebSearch: Whether or not web search is enabled.
   :type enableWebSearch: bool
   :param enablePlayground: Whether or not playground is enabled.
   :type enablePlayground: bool
   :param experimentalFeatures: Experimental features.
   :type experimentalFeatures: dict


   .. py:attribute:: user_information_instructions
      :value: None



   .. py:attribute:: response_instructions
      :value: None



   .. py:attribute:: enable_code_execution
      :value: None



   .. py:attribute:: enable_image_generation
      :value: None



   .. py:attribute:: enable_web_search
      :value: None



   .. py:attribute:: enable_playground
      :value: None



   .. py:attribute:: experimental_features
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CustomLossFunction(client, notebookId=None, name=None, createdAt=None, lossFunctionName=None, lossFunctionType=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Custom Loss Function

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param notebookId: The unique identifier of the notebook used to create/edit the loss function.
   :type notebookId: str
   :param name: Name assigned to the custom loss function.
   :type name: str
   :param createdAt: When the loss function was created.
   :type createdAt: str
   :param lossFunctionName: The name of the function defined in the source code.
   :type lossFunctionName: str
   :param lossFunctionType: The category of problems that this loss would be applicable to, e.g. regression, multi-label classification, etc.
   :type lossFunctionType: str
   :param codeSource: Information about the source code of the loss function.
   :type codeSource: CodeSource


   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: loss_function_name
      :value: None



   .. py:attribute:: loss_function_type
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CustomMetric(client, customMetricId=None, name=None, createdAt=None, problemType=None, notebookId=None, latestCustomMetricVersion={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Custom metric.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param customMetricId: Unique string identifier of the custom metric.
   :type customMetricId: str
   :param name: Name assigned to the custom metric.
   :type name: str
   :param createdAt: Date and time when the custom metric was created (ISO 8601 format).
   :type createdAt: str
   :param problemType: Problem type that this custom metric is applicable to (e.g. regression).
   :type problemType: str
   :param notebookId: Unique string identifier of the notebook used to create/edit the custom metric.
   :type notebookId: str
   :param latestCustomMetricVersion: Latest version of the custom metric.
   :type latestCustomMetricVersion: CustomMetricVersion


   .. py:attribute:: custom_metric_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: problem_type
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: latest_custom_metric_version


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: CustomMetricVersion(client, customMetricVersion=None, name=None, createdAt=None, customMetricFunctionName=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Custom metric version

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param customMetricVersion: Unique string identifier for the custom metric version.
   :type customMetricVersion: str
   :param name: Name assigned to the custom metric.
   :type name: str
   :param createdAt: ISO-8601 string indicating when the custom metric was created.
   :type createdAt: str
   :param customMetricFunctionName: The name of the function defined in the source code.
   :type customMetricFunctionName: str
   :param codeSource: Information about the source code of the custom metric.
   :type codeSource: CodeSource


   .. py:attribute:: custom_metric_version
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: custom_metric_function_name
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: CustomMetricVersion



   .. py:method:: describe()

      Describes a given custom metric version

      :param custom_metric_version: A unique string identifier for the custom metric version.
      :type custom_metric_version: str

      :returns: An object describing the custom metric version.
      :rtype: CustomMetricVersion



.. py:class:: CustomTrainFunctionInfo(client, trainingDataParameterNameMapping=None, schemaMappings=None, trainDataParameterToFeatureGroupIds=None, trainingConfig=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Information about how to call the customer provided train function.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param trainingDataParameterNameMapping: The mapping from feature group type to the dataframe parameter name
   :type trainingDataParameterNameMapping: dict
   :param schemaMappings: The feature type to feature name mapping for each dataframe
   :type schemaMappings: dict
   :param trainDataParameterToFeatureGroupIds: The mapping from the dataframe parameter name to the feature group id backing the data
   :type trainDataParameterToFeatureGroupIds: dict
   :param trainingConfig: The configs for training
   :type trainingConfig: dict


   .. py:attribute:: training_data_parameter_name_mapping
      :value: None



   .. py:attribute:: schema_mappings
      :value: None



   .. py:attribute:: train_data_parameter_to_feature_group_ids
      :value: None



   .. py:attribute:: training_config
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DataConsistencyDuplication(client, totalCount=None, numDuplicates=None, sample={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Data Consistency for duplication within data

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param totalCount: Total count of rows in data.
   :type totalCount: int
   :param numDuplicates: Number of Duplicates based on primary keys in data.
   :type numDuplicates: int
   :param sample: A list of dicts enumerating rows the rows that contained duplications in primary keys.
   :type sample: FeatureRecord


   .. py:attribute:: total_count
      :value: None



   .. py:attribute:: num_duplicates
      :value: None



   .. py:attribute:: sample


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DataMetrics(client, metrics=None, schema=None, numRows=None, numCols=None, numDuplicateRows=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Processed Metrics and Schema for a dataset version or feature group version

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param metrics: A list of dicts with metrics for each columns
   :type metrics: list[dict]
   :param schema: A list of dicts with the schema for each metric
   :type schema: list[dict]
   :param numRows: The number of rows
   :type numRows: int
   :param numCols: The number of columns
   :type numCols: int
   :param numDuplicateRows: The number of duplicate rows
   :type numDuplicateRows: int


   .. py:attribute:: metrics
      :value: None



   .. py:attribute:: schema
      :value: None



   .. py:attribute:: num_rows
      :value: None



   .. py:attribute:: num_cols
      :value: None



   .. py:attribute:: num_duplicate_rows
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DataPrepLogs(client, logs=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs from data preparation.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param logs: List of logs from data preparation during model training.
   :type logs: list[str]


   .. py:attribute:: logs
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DataQualityResults(client, results=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Data Quality results from normalization stage

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param results: A list with different pairs of quality parameters and their values
   :type results: dict


   .. py:attribute:: results
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DataUploadResult(client, docInfos=None, maxCount=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Results of uploading data to agent.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param docInfos: A list of dict for information on the documents uploaded to agent.
   :type docInfos: list[agentdatadocumentinfo]
   :param maxCount: The maximum number of documents
   :type maxCount: int


   .. py:attribute:: doc_infos
      :value: None



   .. py:attribute:: max_count
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DatabaseColumnFeatureMapping(client, databaseColumn=None, feature=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Mapping for export of feature group version to database column

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param databaseColumn: database column name
   :type databaseColumn: str
   :param feature: feature group column it has been matched to
   :type feature: str


   .. py:attribute:: database_column
      :value: None



   .. py:attribute:: feature
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DatabaseConnector(client, databaseConnectorId=None, service=None, name=None, status=None, auth=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A connector to an external service

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param databaseConnectorId: A unique string identifier for the connection.
   :type databaseConnectorId: str
   :param service: An enum string indicating the service this connection connects to.
   :type service: str
   :param name: A user-friendly name for the service.
   :type name: str
   :param status: The status of the database connector.
   :type status: str
   :param auth: Non-secret connection information for this connector.
   :type auth: dict
   :param createdAt: The ISO-8601 string indicating when the API key was created.
   :type createdAt: str


   .. py:attribute:: database_connector_id
      :value: None



   .. py:attribute:: service
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: auth
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: list_objects(fetch_raw_data = False)

      Lists querable objects in the database connector.

      :param fetch_raw_data: If true, return unfiltered objects.
      :type fetch_raw_data: bool



   .. py:method:: get_object_schema(object_name = None, fetch_raw_data = False)

      Get the schema of an object in an database connector.

      :param object_name: Unique identifier for the object in the external system.
      :type object_name: str
      :param fetch_raw_data: If true, return unfiltered list of columns.
      :type fetch_raw_data: bool

      :returns: The schema of the object.
      :rtype: DatabaseConnectorSchema



   .. py:method:: rename(name)

      Renames a Database Connector

      :param name: The new name for the Database Connector.
      :type name: str



   .. py:method:: verify()

      Checks if Abacus.AI can access the specified database.

      :param database_connector_id: Unique string identifier for the database connector.
      :type database_connector_id: str



   .. py:method:: delete()

      Delete a database connector.

      :param database_connector_id: The unique identifier for the database connector.
      :type database_connector_id: str



   .. py:method:: query(query)

      Runs a query in the specified database connector.

      :param query: The query to be run in the database connector.
      :type query: str



.. py:class:: DatabaseConnectorColumn(client, name=None, externalDataType=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A schema description for a column from a database connector

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The unique name of the column.
   :type name: str
   :param externalDataType: The data type of column in the external database system.
   :type externalDataType: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: external_data_type
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DatabaseConnectorSchema(client, tableName=None, columns={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A schema description for a table from a database connector

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param tableName: The unique name of the table.
   :type tableName: str
   :param columns: List of columns in the table.
   :type columns: DatabaseConnectorColumn


   .. py:attribute:: table_name
      :value: None



   .. py:attribute:: columns


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Dataset(client, datasetId=None, sourceType=None, dataSource=None, createdAt=None, ignoreBefore=None, ephemeral=None, lookbackDays=None, databaseConnectorId=None, databaseConnectorConfig=None, connectorType=None, featureGroupTableName=None, applicationConnectorId=None, applicationConnectorConfig=None, incremental=None, isDocumentset=None, extractBoundingBoxes=None, mergeFileSchemas=None, referenceOnlyDocumentset=None, versionLimit=None, schema={}, refreshSchedules={}, latestDatasetVersion={}, parsingConfig={}, documentProcessingConfig={}, attachmentParsingConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A dataset reference

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param datasetId: The unique identifier of the dataset.
   :type datasetId: str
   :param sourceType: The source of the Dataset. EXTERNAL_SERVICE, UPLOAD, or STREAMING.
   :type sourceType: str
   :param dataSource: Location of data. It may be a URI such as an s3 bucket or the database table.
   :type dataSource: str
   :param createdAt: The timestamp at which this dataset was created.
   :type createdAt: str
   :param ignoreBefore: The timestamp at which all previous events are ignored when training.
   :type ignoreBefore: str
   :param ephemeral: The dataset is ephemeral and not used for training.
   :type ephemeral: bool
   :param lookbackDays: Specific to streaming datasets, this specifies how many days worth of data to include when generating a snapshot. Value of 0 indicates leaves this selection to the system.
   :type lookbackDays: int
   :param databaseConnectorId: The Database Connector used.
   :type databaseConnectorId: str
   :param databaseConnectorConfig: The database connector query used to retrieve data.
   :type databaseConnectorConfig: dict
   :param connectorType: The type of connector used to get this dataset FILE or DATABASE.
   :type connectorType: str
   :param featureGroupTableName: The table name of the dataset's feature group
   :type featureGroupTableName: str
   :param applicationConnectorId: The Application Connector used.
   :type applicationConnectorId: str
   :param applicationConnectorConfig: The application connector query used to retrieve data.
   :type applicationConnectorConfig: dict
   :param incremental: If dataset is an incremental dataset.
   :type incremental: bool
   :param isDocumentset: If dataset is a documentset.
   :type isDocumentset: bool
   :param extractBoundingBoxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True.
   :type extractBoundingBoxes: bool
   :param mergeFileSchemas: If the merge file schemas policy is enabled.
   :type mergeFileSchemas: bool
   :param referenceOnlyDocumentset: Signifies whether to save the data reference only. Only valid if is_documentset if True.
   :type referenceOnlyDocumentset: bool
   :param versionLimit: Version limit for the dataset.
   :type versionLimit: int
   :param latestDatasetVersion: The latest version of this dataset.
   :type latestDatasetVersion: DatasetVersion
   :param schema: List of resolved columns.
   :type schema: DatasetColumn
   :param refreshSchedules: List of schedules that determines when the next version of the dataset will be created.
   :type refreshSchedules: RefreshSchedule
   :param parsingConfig: The parsing config used for dataset.
   :type parsingConfig: ParsingConfig
   :param documentProcessingConfig: The document processing config used for dataset (when is_documentset is True).
   :type documentProcessingConfig: DocumentProcessingConfig
   :param attachmentParsingConfig: The attachment parsing config used for dataset (eg. for salesforce attachment parsing)
   :type attachmentParsingConfig: AttachmentParsingConfig


   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: source_type
      :value: None



   .. py:attribute:: data_source
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: ignore_before
      :value: None



   .. py:attribute:: ephemeral
      :value: None



   .. py:attribute:: lookback_days
      :value: None



   .. py:attribute:: database_connector_id
      :value: None



   .. py:attribute:: database_connector_config
      :value: None



   .. py:attribute:: connector_type
      :value: None



   .. py:attribute:: feature_group_table_name
      :value: None



   .. py:attribute:: application_connector_id
      :value: None



   .. py:attribute:: application_connector_config
      :value: None



   .. py:attribute:: incremental
      :value: None



   .. py:attribute:: is_documentset
      :value: None



   .. py:attribute:: extract_bounding_boxes
      :value: None



   .. py:attribute:: merge_file_schemas
      :value: None



   .. py:attribute:: reference_only_documentset
      :value: None



   .. py:attribute:: version_limit
      :value: None



   .. py:attribute:: schema


   .. py:attribute:: refresh_schedules


   .. py:attribute:: latest_dataset_version


   .. py:attribute:: parsing_config


   .. py:attribute:: document_processing_config


   .. py:attribute:: attachment_parsing_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: create_version_from_file_connector(location = None, file_format = None, csv_delimiter = None, merge_file_schemas = None, parsing_config = None, sql_query = None)

      Creates a new version of the specified dataset.

      :param location: External URI to import the dataset from. If not specified, the last location will be used.
      :type location: str
      :param file_format: File format to be used. If not specified, the service will try to detect the file format.
      :type file_format: str
      :param csv_delimiter: If the file format is CSV, use a specific CSV delimiter.
      :type csv_delimiter: str
      :param merge_file_schemas: Signifies if the merge file schema policy is enabled.
      :type merge_file_schemas: bool
      :param parsing_config: Custom config for dataset parsing.
      :type parsing_config: ParsingConfig
      :param sql_query: The SQL query to use when fetching data from the specified location. Use `__TABLE__` as a placeholder for the table name. For example: "SELECT * FROM __TABLE__ WHERE event_date > '2021-01-01'". If not provided, the entire dataset from the specified location will be imported.
      :type sql_query: str

      :returns: The new Dataset Version created.
      :rtype: DatasetVersion



   .. py:method:: create_version_from_database_connector(object_name = None, columns = None, query_arguments = None, sql_query = None)

      Creates a new version of the specified dataset.

      :param object_name: The name/ID of the object in the service to query. If not specified, the last name will be used.
      :type object_name: str
      :param columns: The columns to query from the external service object. If not specified, the last columns will be used.
      :type columns: str
      :param query_arguments: Additional query arguments to filter the data. If not specified, the last arguments will be used.
      :type query_arguments: str
      :param sql_query: The full SQL query to use when fetching data. If present, this parameter will override object_name, columns, and query_arguments.
      :type sql_query: str

      :returns: The new Dataset Version created.
      :rtype: DatasetVersion



   .. py:method:: create_version_from_application_connector(dataset_config = None)

      Creates a new version of the specified dataset.

      :param dataset_config: Dataset config for the application connector. If any of the fields are not specified, the last values will be used.
      :type dataset_config: ApplicationConnectorDatasetConfig

      :returns: The new Dataset Version created.
      :rtype: DatasetVersion



   .. py:method:: create_version_from_upload(file_format = None)

      Creates a new version of the specified dataset using a local file upload.

      :param file_format: File format to be used. If not specified, the service will attempt to detect the file format.
      :type file_format: str

      :returns: Token to be used when uploading file parts.
      :rtype: Upload



   .. py:method:: create_version_from_document_reprocessing(document_processing_config = None)

      Creates a new dataset version for a source docstore dataset with the provided document processing configuration. This does not re-import the data but uses the same data which is imported in the latest dataset version and only performs document processing on it.

      :param document_processing_config: The document processing configuration to use for the new dataset version. If not specified, the document processing configuration from the source dataset will be used.
      :type document_processing_config: DatasetDocumentProcessingConfig

      :returns: The new dataset version created.
      :rtype: DatasetVersion



   .. py:method:: snapshot_streaming_data()

      Snapshots the current data in the streaming dataset.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str

      :returns: The new Dataset Version created by taking a snapshot of the current data in the streaming dataset.
      :rtype: DatasetVersion



   .. py:method:: set_column_data_type(column, data_type)

      Set a Dataset's column type.

      :param column: The name of the column.
      :type column: str
      :param data_type: The type of the data in the column. Note: Some ColumnMappings may restrict the options or explicitly set the DataType.
      :type data_type: DataType

      :returns: The dataset and schema after the data type has been set.
      :rtype: Dataset



   .. py:method:: set_streaming_retention_policy(retention_hours = None, retention_row_count = None, ignore_records_before_timestamp = None)

      Sets the streaming retention policy.

      :param retention_hours: Number of hours to retain streamed data in memory.
      :type retention_hours: int
      :param retention_row_count: Number of rows to retain streamed data in memory.
      :type retention_row_count: int
      :param ignore_records_before_timestamp: The Unix timestamp (in seconds) to use as a cutoff to ignore all entries sent before it
      :type ignore_records_before_timestamp: int



   .. py:method:: get_schema()

      Retrieves the column schema of a dataset.

      :param dataset_id: Unique string identifier of the dataset schema to look up.
      :type dataset_id: str

      :returns: List of column schema definitions.
      :rtype: list[DatasetColumn]



   .. py:method:: set_database_connector_config(database_connector_id, object_name = None, columns = None, query_arguments = None, sql_query = None)

      Sets database connector config for a dataset. This method is currently only supported for streaming datasets.

      :param database_connector_id: Unique String Identifier of the Database Connector to import the dataset from.
      :type database_connector_id: str
      :param object_name: If applicable, the name/ID of the object in the service to query.
      :type object_name: str
      :param columns: The columns to query from the external service object.
      :type columns: str
      :param query_arguments: Additional query arguments to filter the data.
      :type query_arguments: str
      :param sql_query: The full SQL query to use when fetching data. If present, this parameter will override `object_name`, `columns` and `query_arguments`.
      :type sql_query: str



   .. py:method:: update_version_limit(version_limit)

      Updates the version limit for the specified dataset.

      :param version_limit: The maximum number of versions permitted for the feature group. Once this limit is exceeded, the oldest versions will be purged in a First-In-First-Out (FIFO) order.
      :type version_limit: int

      :returns: The updated dataset.
      :rtype: Dataset



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Dataset



   .. py:method:: describe()

      Retrieves a full description of the specified dataset, with attributes such as its ID, name, source type, etc.

      :param dataset_id: The unique ID associated with the dataset.
      :type dataset_id: str

      :returns: The dataset.
      :rtype: Dataset



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      Retrieves a list of all dataset versions for the specified dataset.

      :param limit: The maximum length of the list of all dataset versions.
      :type limit: int
      :param start_after_version: The ID of the version after which the list starts.
      :type start_after_version: str

      :returns: A list of dataset versions.
      :rtype: list[DatasetVersion]



   .. py:method:: delete()

      Deletes the specified dataset from the organization.

      :param dataset_id: Unique string identifier of the dataset to delete.
      :type dataset_id: str



   .. py:method:: wait_for_import(timeout=900)

      A waiting call until dataset is imported.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_inspection(timeout=None)

      A waiting call until dataset is completely inspected.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the latest dataset version.

      :returns: A string describing the status of a dataset (importing, inspecting, complete, etc.).
      :rtype: str



   .. py:method:: describe_feature_group()

      Gets the feature group attached to the dataset.

      :returns: A feature group object.
      :rtype: FeatureGroup



   .. py:method:: create_refresh_policy(cron)

      To create a refresh policy for a dataset.

      :param cron: A cron style string to set the refresh time.
      :type cron: str

      :returns: The refresh policy object.
      :rtype: RefreshPolicy



   .. py:method:: list_refresh_policies()

      Gets the refresh policies in a list.

      :returns: A list of refresh policy objects.
      :rtype: List[RefreshPolicy]



.. py:class:: DatasetColumn(client, name=None, dataType=None, detectedDataType=None, featureType=None, detectedFeatureType=None, originalName=None, validDataTypes=None, timeFormat=None, timestampFrequency=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A schema description for a column

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The unique name of the column.
   :type name: str
   :param dataType: The underlying data type of each column.
   :type dataType: str
   :param detectedDataType: The detected data type of the column.
   :type detectedDataType: str
   :param featureType: Feature type of the column.
   :type featureType: str
   :param detectedFeatureType: The detected feature type of the column.
   :type detectedFeatureType: str
   :param originalName: The original name of the column.
   :type originalName: str
   :param validDataTypes: The valid data type options for this column.
   :type validDataTypes: list[str]
   :param timeFormat: The detected time format of the column.
   :type timeFormat: str
   :param timestampFrequency: The detected frequency of the timestamps in the dataset.
   :type timestampFrequency: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: data_type
      :value: None



   .. py:attribute:: detected_data_type
      :value: None



   .. py:attribute:: feature_type
      :value: None



   .. py:attribute:: detected_feature_type
      :value: None



   .. py:attribute:: original_name
      :value: None



   .. py:attribute:: valid_data_types
      :value: None



   .. py:attribute:: time_format
      :value: None



   .. py:attribute:: timestamp_frequency
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DatasetVersion(client, datasetVersion=None, status=None, datasetId=None, size=None, rowCount=None, fileInspectMetadata=None, createdAt=None, error=None, incrementalQueriedAt=None, uploadId=None, mergeFileSchemas=None, databaseConnectorConfig=None, applicationConnectorConfig=None, invalidRecords=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A specific version of a dataset

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param datasetVersion: The unique identifier of the dataset version.
   :type datasetVersion: str
   :param status: The current status of the dataset version
   :type status: str
   :param datasetId: A reference to the Dataset this dataset version belongs to.
   :type datasetId: str
   :param size: The size in bytes of the file.
   :type size: int
   :param rowCount: Number of rows in the dataset version.
   :type rowCount: int
   :param fileInspectMetadata: Metadata information about file's inspection. For example - the detected delimiter for CSV files.
   :type fileInspectMetadata: dict
   :param createdAt: The timestamp this dataset version was created.
   :type createdAt: str
   :param error: If status is FAILED, this field will be populated with an error.
   :type error: str
   :param incrementalQueriedAt: If the dataset version is from an incremental dataset, this is the last entry of timestamp column when the dataset version was created.
   :type incrementalQueriedAt: str
   :param uploadId: If the dataset version is being uploaded, this the reference to the Upload
   :type uploadId: str
   :param mergeFileSchemas: If the merge file schemas policy is enabled.
   :type mergeFileSchemas: bool
   :param databaseConnectorConfig: The database connector query used to retrieve data for this version.
   :type databaseConnectorConfig: dict
   :param applicationConnectorConfig: The application connector used to retrieve data for this version.
   :type applicationConnectorConfig: dict
   :param invalidRecords: Invalid records in the dataset version
   :type invalidRecords: str


   .. py:attribute:: dataset_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: size
      :value: None



   .. py:attribute:: row_count
      :value: None



   .. py:attribute:: file_inspect_metadata
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: incremental_queried_at
      :value: None



   .. py:attribute:: upload_id
      :value: None



   .. py:attribute:: merge_file_schemas
      :value: None



   .. py:attribute:: database_connector_config
      :value: None



   .. py:attribute:: application_connector_config
      :value: None



   .. py:attribute:: invalid_records
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: get_metrics(selected_columns = None, include_charts = False, include_statistics = True)

      Get metrics for a specific dataset version.

      :param selected_columns: A list of columns to order first.
      :type selected_columns: List
      :param include_charts: A flag indicating whether charts should be included in the response. Default is false.
      :type include_charts: bool
      :param include_statistics: A flag indicating whether statistics should be included in the response. Default is true.
      :type include_statistics: bool

      :returns: The metrics for the specified Dataset version.
      :rtype: DataMetrics



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: DatasetVersion



   .. py:method:: describe()

      Retrieves a full description of the specified dataset version, including its ID, name, source type, and other attributes.

      :param dataset_version: Unique string identifier associated with the dataset version.
      :type dataset_version: str

      :returns: The dataset version.
      :rtype: DatasetVersion



   .. py:method:: delete()

      Deletes the specified dataset version from the organization.

      :param dataset_version: String identifier of the dataset version to delete.
      :type dataset_version: str



   .. py:method:: get_logs()

      Retrieves the dataset import logs.

      :param dataset_version: The unique version ID of the dataset version.
      :type dataset_version: str

      :returns: The logs for the specified dataset version.
      :rtype: DatasetVersionLogs



   .. py:method:: wait_for_import(timeout=900)

      A waiting call until dataset version is imported.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_inspection(timeout=None)

      A waiting call until dataset version is completely inspected.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the dataset version.

      :returns: A string describing the status of a dataset version (importing, inspecting, complete, etc.).
      :rtype: str



.. py:class:: DatasetVersionLogs(client, logs=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs from dataset version.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param logs: List of logs from dataset version.
   :type logs: list[str]


   .. py:attribute:: logs
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Deployment(client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A model deployment

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param deploymentId: A unique identifier for the deployment.
   :type deploymentId: str
   :param name: A user-friendly name for the deployment.
   :type name: str
   :param status: The status of the deployment.
   :type status: str
   :param description: A description of the deployment.
   :type description: str
   :param deployedAt: The date and time when the deployment became active, in ISO-8601 format.
   :type deployedAt: str
   :param createdAt: The date and time when the deployment was created, in ISO-8601 format.
   :type createdAt: str
   :param projectId: A unique identifier for the project this deployment belongs to.
   :type projectId: str
   :param modelId: The model that is currently deployed.
   :type modelId: str
   :param modelVersion: The model version ID that is currently deployed.
   :type modelVersion: str
   :param featureGroupId: The feature group that is currently deployed.
   :type featureGroupId: str
   :param featureGroupVersion: The feature group version ID that is currently deployed.
   :type featureGroupVersion: str
   :param callsPerSecond: The number of calls per second the deployment can handle.
   :type callsPerSecond: int
   :param autoDeploy: A flag marking the deployment as eligible for auto deployments whenever any model in the project finishes training.
   :type autoDeploy: bool
   :param skipMetricsCheck: A flag to skip metric regression with this current deployment. This field is only relevant when auto_deploy is on
   :type skipMetricsCheck: bool
   :param algoName: The name of the algorithm that is currently deployed.
   :type algoName: str
   :param regions: A list of regions that the deployment has been deployed to.
   :type regions: list
   :param error: The relevant error, if the status is FAILED.
   :type error: str
   :param batchStreamingUpdates: A flag marking the feature group deployment as having enabled a background process which caches streamed-in rows for quicker lookup.
   :type batchStreamingUpdates: bool
   :param algorithm: The algorithm that is currently deployed.
   :type algorithm: str
   :param pendingModelVersion: The model that the deployment is switching to, or being stopped.
   :type pendingModelVersion: dict
   :param modelDeploymentConfig: The config for which model to be deployed.
   :type modelDeploymentConfig: dict
   :param predictionOperatorId: The prediction operator ID that is currently deployed.
   :type predictionOperatorId: str
   :param predictionOperatorVersion: The prediction operator version ID that is currently deployed.
   :type predictionOperatorVersion: str
   :param pendingPredictionOperatorVersion: The prediction operator version ID that the deployment is switching to, or being stopped.
   :type pendingPredictionOperatorVersion: str
   :param onlineFeatureGroupId: The online feature group ID that the deployment is running on
   :type onlineFeatureGroupId: id
   :param outputOnlineFeatureGroupId: The online feature group ID that the deployment is outputting results to
   :type outputOnlineFeatureGroupId: id
   :param realtimeMonitorId: The realtime monitor ID of the realtime-monitor that is associated with the deployment
   :type realtimeMonitorId: id
   :param runtimeConfigs: The runtime configurations of a deployment which is used by some of the usecases during prediction.
   :type runtimeConfigs: dict
   :param refreshSchedules: A list of refresh schedules that indicate when the deployment will be updated to the latest model version.
   :type refreshSchedules: RefreshSchedule
   :param featureGroupExportConfig: The export config (file connector or database connector information) for feature group deployment exports.
   :type featureGroupExportConfig: FeatureGroupExportConfig
   :param defaultPredictionArguments: The default prediction arguments for prediction APIs
   :type defaultPredictionArguments: PredictionArguments


   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: deployed_at
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: calls_per_second
      :value: None



   .. py:attribute:: auto_deploy
      :value: None



   .. py:attribute:: skip_metrics_check
      :value: None



   .. py:attribute:: algo_name
      :value: None



   .. py:attribute:: regions
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: batch_streaming_updates
      :value: None



   .. py:attribute:: algorithm
      :value: None



   .. py:attribute:: pending_model_version
      :value: None



   .. py:attribute:: model_deployment_config
      :value: None



   .. py:attribute:: prediction_operator_id
      :value: None



   .. py:attribute:: prediction_operator_version
      :value: None



   .. py:attribute:: pending_prediction_operator_version
      :value: None



   .. py:attribute:: online_feature_group_id
      :value: None



   .. py:attribute:: output_online_feature_group_id
      :value: None



   .. py:attribute:: realtime_monitor_id
      :value: None



   .. py:attribute:: runtime_configs
      :value: None



   .. py:attribute:: refresh_schedules


   .. py:attribute:: feature_group_export_config


   .. py:attribute:: default_prediction_arguments


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: create_webhook(endpoint, webhook_event_type, payload_template = None)

      Create a webhook attached to a given deployment ID.

      :param endpoint: URI that the webhook will send HTTP POST requests to.
      :type endpoint: str
      :param webhook_event_type: One of 'DEPLOYMENT_START', 'DEPLOYMENT_SUCCESS', or 'DEPLOYMENT_FAILED'.
      :type webhook_event_type: str
      :param payload_template: Template for the body of the HTTP POST requests. Defaults to {}.
      :type payload_template: dict

      :returns: The webhook attached to the deployment.
      :rtype: Webhook



   .. py:method:: list_webhooks()

      List all the webhooks attached to a given deployment.

      :param deployment_id: Unique identifier of the target deployment.
      :type deployment_id: str

      :returns: List of the webhooks attached to the given deployment ID.
      :rtype: list[Webhook]



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Deployment



   .. py:method:: describe()

      Retrieves a full description of the specified deployment.

      :param deployment_id: Unique string identifier associated with the deployment.
      :type deployment_id: str

      :returns: Description of the deployment.
      :rtype: Deployment



   .. py:method:: update(description = None, auto_deploy = None, skip_metrics_check = None)

      Updates a deployment's properties.

      :param description: The new description for the deployment.
      :type description: str
      :param auto_deploy: Flag to enable the automatic deployment when a new Model Version finishes training.
      :type auto_deploy: bool
      :param skip_metrics_check: Flag to skip metric regression with this current deployment. This field is only relevant when auto_deploy is on
      :type skip_metrics_check: bool



   .. py:method:: rename(name)

      Updates a deployment's name

      :param name: The new deployment name.
      :type name: str



   .. py:method:: set_auto(enable = None)

      Enable or disable auto deployment for the specified deployment.

      When a model is scheduled to retrain, deployments with auto deployment enabled will be marked to automatically promote the new model version. After the newly trained model completes, a check on its metrics in comparison to the currently deployed model version will be performed. If the metrics are comparable or better, the newly trained model version is automatically promoted. If not, it will be marked as a failed model version promotion with an error indicating poor metrics performance.


      :param enable: Enable or disable the autoDeploy property of the deployment.
      :type enable: bool



   .. py:method:: set_model_version(model_version, algorithm = None, model_deployment_config = None)

      Promotes a model version and/or algorithm to be the active served deployment version

      :param model_version: A unique identifier for the model version.
      :type model_version: str
      :param algorithm: The algorithm to use for the model version. If not specified, the algorithm will be inferred from the model version.
      :type algorithm: str
      :param model_deployment_config: The deployment configuration for the model to deploy.
      :type model_deployment_config: dict



   .. py:method:: set_feature_group_version(feature_group_version)

      Promotes a feature group version to be served in the deployment.

      :param feature_group_version: Unique string identifier for the feature group version.
      :type feature_group_version: str



   .. py:method:: set_prediction_operator_version(prediction_operator_version)

      Promotes a prediction operator version to be served in the deployment.

      :param prediction_operator_version: Unique string identifier for the prediction operator version.
      :type prediction_operator_version: str



   .. py:method:: start()

      Restarts the specified deployment that was previously suspended.

      :param deployment_id: A unique string identifier associated with the deployment.
      :type deployment_id: str



   .. py:method:: stop()

      Stops the specified deployment.

      :param deployment_id: Unique string identifier of the deployment to be stopped.
      :type deployment_id: str



   .. py:method:: delete()

      Deletes the specified deployment. The deployment's models will not be affected. Note that the deployments are not recoverable after they are deleted.

      :param deployment_id: Unique string identifier of the deployment to delete.
      :type deployment_id: str



   .. py:method:: set_feature_group_export_file_connector_output(file_format = None, output_location = None)

      Sets the export output for the Feature Group Deployment to be a file connector.

      :param file_format: The type of export output, either CSV or JSON.
      :type file_format: str
      :param output_location: The file connector (cloud) location where the output should be exported.
      :type output_location: str



   .. py:method:: set_feature_group_export_database_connector_output(database_connector_id, object_name, write_mode, database_feature_mapping, id_column = None, additional_id_columns = None)

      Sets the export output for the Feature Group Deployment to a Database connector.

      :param database_connector_id: The unique string identifier of the database connector used.
      :type database_connector_id: str
      :param object_name: The object of the database connector to write to.
      :type object_name: str
      :param write_mode: The write mode to use when writing to the database connector, either UPSERT or INSERT.
      :type write_mode: str
      :param database_feature_mapping: The column/feature pairs mapping the features to the database columns.
      :type database_feature_mapping: dict
      :param id_column: The id column to use as the upsert key.
      :type id_column: str
      :param additional_id_columns: For database connectors which support it, a list of additional ID columns to use as a complex key for upserting.
      :type additional_id_columns: list



   .. py:method:: remove_feature_group_export_output()

      Removes the export type that is set for the Feature Group Deployment

      :param deployment_id: The ID of the deployment for which the export type is set.
      :type deployment_id: str



   .. py:method:: set_default_prediction_arguments(prediction_arguments, set_as_override = False)

      Sets the deployment config.

      :param prediction_arguments: The prediction arguments to set.
      :type prediction_arguments: PredictionArguments
      :param set_as_override: If True, use these arguments as overrides instead of defaults for predict calls
      :type set_as_override: bool

      :returns: description of the updated deployment.
      :rtype: Deployment



   .. py:method:: get_prediction_logs_records(limit = 10, last_log_request_id = '', last_log_timestamp = None)

      Retrieves the prediction request IDs for the most recent predictions made to the deployment.

      :param limit: The number of prediction log entries to retrieve up to the specified limit.
      :type limit: int
      :param last_log_request_id: The request ID of the last log entry to retrieve.
      :type last_log_request_id: str
      :param last_log_timestamp: A Unix timestamp in milliseconds specifying the timestamp for the last log entry.
      :type last_log_timestamp: int

      :returns: A list of prediction log records.
      :rtype: list[PredictionLogRecord]



   .. py:method:: create_alert(alert_name, condition_config, action_config)

      Create a deployment alert for the given conditions.

      Only support batch prediction usage now.


      :param alert_name: Name of the alert.
      :type alert_name: str
      :param condition_config: Condition to run the actions for the alert.
      :type condition_config: AlertConditionConfig
      :param action_config: Configuration for the action of the alert.
      :type action_config: AlertActionConfig

      :returns: Object describing the deployment alert.
      :rtype: MonitorAlert



   .. py:method:: list_alerts()

      List the monitor alerts associated with the deployment id.

      :param deployment_id: Unique string identifier for the deployment.
      :type deployment_id: str

      :returns: An array of deployment alerts.
      :rtype: list[MonitorAlert]



   .. py:method:: create_realtime_monitor(realtime_monitor_schedule = None, lookback_time = None)

      Real time monitors compute and monitor metrics of real time prediction data.

      :param realtime_monitor_schedule: The cron expression for triggering monitor.
      :type realtime_monitor_schedule: str
      :param lookback_time: Lookback time (in seconds) for each monitor trigger
      :type lookback_time: int

      :returns: Object describing the real-time monitor.
      :rtype: RealtimeMonitor



   .. py:method:: get_conversation_response(message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)

      Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

      :param message: A message from the user
      :type message: str
      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrived search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifiying the query chat config override.
      :type chat_config: dict
      :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list



   .. py:method:: get_conversation_response_with_binary_data(deployment_token, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, attachments = None)

      Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param message: A message from the user
      :type message: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrived search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifiying the query chat config override.
      :type chat_config: dict
      :param attachments: A dictionary of binary data to use to answer the queries.
      :type attachments: None



   .. py:method:: create_batch_prediction(table_name = None, name = None, global_prediction_args = None, batch_prediction_args = None, explanations = False, output_format = None, output_location = None, database_connector_id = None, database_output_config = None, refresh_schedule = None, csv_input_prefix = None, csv_prediction_prefix = None, csv_explanations_prefix = None, output_includes_metadata = None, result_input_columns = None, input_feature_groups = None)

      Creates a batch prediction job description for the given deployment.

      :param table_name: Name of the feature group table to write the results of the batch prediction. Can only be specified if outputLocation and databaseConnectorId are not specified. If tableName is specified, the outputType will be enforced as CSV.
      :type table_name: str
      :param name: Name of the batch prediction job.
      :type name: str
      :param batch_prediction_args: Batch Prediction args specific to problem type.
      :type batch_prediction_args: BatchPredictionArgs
      :param output_format: Format of the batch prediction output (CSV or JSON).
      :type output_format: str
      :param output_location: Location to write the prediction results. Otherwise, results will be stored in Abacus.AI.
      :type output_location: str
      :param database_connector_id: Unique identifier of a Database Connection to write predictions to. Cannot be specified in conjunction with outputLocation.
      :type database_connector_id: str
      :param database_output_config: Key-value pair of columns/values to write to the database connector. Only available if databaseConnectorId is specified.
      :type database_output_config: dict
      :param refresh_schedule: Cron-style string that describes a schedule in UTC to automatically run the batch prediction.
      :type refresh_schedule: str
      :param csv_input_prefix: Prefix to prepend to the input columns, only applies when output format is CSV.
      :type csv_input_prefix: str
      :param csv_prediction_prefix: Prefix to prepend to the prediction columns, only applies when output format is CSV.
      :type csv_prediction_prefix: str
      :param csv_explanations_prefix: Prefix to prepend to the explanation columns, only applies when output format is CSV.
      :type csv_explanations_prefix: str
      :param output_includes_metadata: If true, output will contain columns including prediction start time, batch prediction version, and model version.
      :type output_includes_metadata: bool
      :param result_input_columns: If present, will limit result files or feature groups to only include columns present in this list.
      :type result_input_columns: list
      :param input_feature_groups: A dict of {'<feature_group_type>': '<feature_group_id>'} which overrides the default input data of that type for the Batch Prediction. Default input data is the training data that was used for training the deployed model.
      :type input_feature_groups: dict

      :returns: The batch prediction description.
      :rtype: BatchPrediction



   .. py:method:: get_statistics_over_time(start_date, end_date)

      Return basic access statistics for the given window

      :param start_date: Timeline start date in ISO format.
      :type start_date: str
      :param end_date: Timeline end date in ISO format. The date range must be 7 days or less.
      :type end_date: str

      :returns: Object describing Time series data of the number of requests and latency over the specified time period.
      :rtype: DeploymentStatistics



   .. py:method:: describe_feature_group_row_process_by_key(primary_key_value)

      Gets the feature group row process.

      :param primary_key_value: The primary key value
      :type primary_key_value: str

      :returns: An object representing the feature group row process
      :rtype: FeatureGroupRowProcess



   .. py:method:: list_feature_group_row_processes(limit = None, status = None)

      Gets a list of feature group row processes.

      :param limit: The maximum number of processes to return. Defaults to None.
      :type limit: int
      :param status: The status of the processes to return. Defaults to None.
      :type status: str

      :returns: A list of object representing the feature group row process
      :rtype: list[FeatureGroupRowProcess]



   .. py:method:: get_feature_group_row_process_summary()

      Gets a summary of the statuses of the individual feature group processes.

      :param deployment_id: The deployment id for the process
      :type deployment_id: str

      :returns: An object representing the summary of the statuses of the individual feature group processes
      :rtype: FeatureGroupRowProcessSummary



   .. py:method:: reset_feature_group_row_process_by_key(primary_key_value)

      Resets a feature group row process so that it can be reprocessed

      :param primary_key_value: The primary key value
      :type primary_key_value: str

      :returns: An object representing the feature group row process.
      :rtype: FeatureGroupRowProcess



   .. py:method:: get_feature_group_row_process_logs_by_key(primary_key_value)

      Gets the logs for a feature group row process

      :param primary_key_value: The primary key value
      :type primary_key_value: str

      :returns: An object representing the logs for the feature group row process
      :rtype: FeatureGroupRowProcessLogs



   .. py:method:: create_conversation(name = None, external_application_id = None)

      Creates a deployment conversation.

      :param name: The name of the conversation.
      :type name: str
      :param external_application_id: The external application id associated with the deployment conversation.
      :type external_application_id: str

      :returns: The deployment conversation.
      :rtype: DeploymentConversation



   .. py:method:: list_conversations(external_application_id = None, conversation_type = None, fetch_last_llm_info = False, limit = None, search = None)

      Lists all conversations for the given deployment and current user.

      :param external_application_id: The external application id associated with the deployment conversation. If specified, only conversations created on that application will be listed.
      :type external_application_id: str
      :param conversation_type: The type of the conversation indicating its origin.
      :type conversation_type: DeploymentConversationType
      :param fetch_last_llm_info: If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
      :type fetch_last_llm_info: bool
      :param limit: The number of conversations to return. Defaults to 600.
      :type limit: int
      :param search: The search query to filter conversations by title.
      :type search: str

      :returns: The deployment conversations.
      :rtype: list[DeploymentConversation]



   .. py:method:: create_external_application(name = None, description = None, logo = None, theme = None)

      Creates a new External Application from an existing ChatLLM Deployment.

      :param name: The name of the External Application. If not provided, the name of the deployment will be used.
      :type name: str
      :param description: The description of the External Application. This will be shown to users when they access the External Application. If not provided, the description of the deployment will be used.
      :type description: str
      :param logo: The logo to be displayed.
      :type logo: str
      :param theme: The visual theme of the External Application.
      :type theme: dict

      :returns: The newly created External Application.
      :rtype: ExternalApplication



   .. py:method:: download_agent_attachment(attachment_id)

      Return an agent attachment.

      :param attachment_id: The attachment ID.
      :type attachment_id: str



   .. py:method:: wait_for_deployment(wait_states={'PENDING', 'DEPLOYING'}, timeout=900)

      A waiting call until deployment is completed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_pending_deployment_update(timeout=900)

      A waiting call until deployment is in a stable state, that pending model switch is completed and previous model is stopped.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int

      :returns: the latest deployment object.
      :rtype: Deployment



   .. py:method:: get_status()

      Gets the status of the deployment.

      :returns: A string describing the status of a deploymet (pending, deploying, active, etc.).
      :rtype: str



   .. py:method:: create_refresh_policy(cron)

      To create a refresh policy for a deployment.

      :param cron: A cron style string to set the refresh time.
      :type cron: str

      :returns: The refresh policy object.
      :rtype: RefreshPolicy



   .. py:method:: list_refresh_policies()

      Gets the refresh policies in a list.

      :returns: A list of refresh policy objects.
      :rtype: List[RefreshPolicy]



.. py:class:: DeploymentAuthToken(client, deploymentToken=None, createdAt=None, name=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A deployment authentication token that is used to authenticate prediction requests

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param deploymentToken: The unique token used to authenticate requests.
   :type deploymentToken: str
   :param createdAt: The date and time when the token was created, in ISO-8601 format.
   :type createdAt: str
   :param name: The name associated with the authentication token.
   :type name: str


   .. py:attribute:: deployment_token
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DeploymentConversation(client, deploymentConversationId=None, name=None, deploymentId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, history={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A deployment conversation.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param deploymentConversationId: The unique identifier of the deployment conversation.
   :type deploymentConversationId: str
   :param name: The name of the deployment conversation.
   :type name: str
   :param deploymentId: The deployment id associated with the deployment conversation.
   :type deploymentId: str
   :param createdAt: The timestamp at which the deployment conversation was created.
   :type createdAt: str
   :param lastEventCreatedAt: The timestamp at which the most recent corresponding deployment conversation event was created at.
   :type lastEventCreatedAt: str
   :param hasHistory: Whether the deployment conversation has any history.
   :type hasHistory: bool
   :param externalSessionId: The external session id associated with the deployment conversation.
   :type externalSessionId: str
   :param regenerateAttempt: The sequence number of regeneration. Not regenerated if 0.
   :type regenerateAttempt: int
   :param externalApplicationId: The external application id associated with the deployment conversation.
   :type externalApplicationId: str
   :param unusedDocumentUploadIds: The list of unused document upload ids associated with the deployment conversation.
   :type unusedDocumentUploadIds: list[str]
   :param humanizeInstructions: Instructions for humanizing the conversation.
   :type humanizeInstructions: dict
   :param conversationWarning: Extra text associated with the deployment conversation (to show it at the bottom of chatbot).
   :type conversationWarning: str
   :param conversationType: The type of the conversation, which depicts the application it caters to.
   :type conversationType: str
   :param metadata: Additional backend information about the conversation.
   :type metadata: dict
   :param llmDisplayName: The display name of the LLM model used to generate the most recent response. Only used for system-created bots.
   :type llmDisplayName: str
   :param llmBotIcon: The icon location of the LLM model used to generate the most recent response. Only used for system-created bots.
   :type llmBotIcon: str
   :param searchSuggestions: The list of search suggestions for the conversation.
   :type searchSuggestions: list
   :param chatllmTaskId: The chatllm task id associated with the deployment conversation.
   :type chatllmTaskId: str
   :param history: The history of the deployment conversation.
   :type history: DeploymentConversationEvent


   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: last_event_created_at
      :value: None



   .. py:attribute:: has_history
      :value: None



   .. py:attribute:: external_session_id
      :value: None



   .. py:attribute:: regenerate_attempt
      :value: None



   .. py:attribute:: external_application_id
      :value: None



   .. py:attribute:: unused_document_upload_ids
      :value: None



   .. py:attribute:: humanize_instructions
      :value: None



   .. py:attribute:: conversation_warning
      :value: None



   .. py:attribute:: conversation_type
      :value: None



   .. py:attribute:: metadata
      :value: None



   .. py:attribute:: llm_display_name
      :value: None



   .. py:attribute:: llm_bot_icon
      :value: None



   .. py:attribute:: search_suggestions
      :value: None



   .. py:attribute:: chatllm_task_id
      :value: None



   .. py:attribute:: history


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: get(external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False)

      Gets a deployment conversation.

      :param external_session_id: External session ID of the conversation.
      :type external_session_id: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str
      :param filter_intermediate_conversation_events: If true, intermediate conversation events will be filtered out. Default is true.
      :type filter_intermediate_conversation_events: bool
      :param get_unused_document_uploads: If true, unused document uploads will be returned. Default is false.
      :type get_unused_document_uploads: bool

      :returns: The deployment conversation.
      :rtype: DeploymentConversation



   .. py:method:: delete(deployment_id = None)

      Delete a Deployment Conversation.

      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str



   .. py:method:: clear(external_session_id = None, deployment_id = None, user_message_indices = None)

      Clear the message history of a Deployment Conversation.

      :param external_session_id: The external session id associated with the deployment conversation.
      :type external_session_id: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str
      :param user_message_indices: Optional list of user message indices to clear. The associated bot response will also be cleared. If not provided, all messages will be cleared.
      :type user_message_indices: list



   .. py:method:: set_feedback(message_index, is_useful = None, is_not_useful = None, feedback = None, feedback_type = None, deployment_id = None)

      Sets a deployment conversation message as useful or not useful

      :param message_index: The index of the deployment conversation message
      :type message_index: int
      :param is_useful: If the message is useful. If true, the message is useful. If false, clear the useful flag.
      :type is_useful: bool
      :param is_not_useful: If the message is not useful. If true, the message is not useful. If set to false, clear the useful flag.
      :type is_not_useful: bool
      :param feedback: Optional feedback on why the message is useful or not useful
      :type feedback: str
      :param feedback_type: Optional feedback type
      :type feedback_type: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str



   .. py:method:: rename(name, deployment_id = None)

      Rename a Deployment Conversation.

      :param name: The new name of the conversation.
      :type name: str
      :param deployment_id: The deployment this conversation belongs to. This is required if not logged in.
      :type deployment_id: str



   .. py:method:: export(external_session_id = None)

      Export a Deployment Conversation.

      :param external_session_id: The external session id associated with the deployment conversation. One of deployment_conversation_id or external_session_id must be provided.
      :type external_session_id: str

      :returns: The deployment conversation html export.
      :rtype: DeploymentConversationExport



   .. py:method:: construct_agent_conversation_messages_for_llm(external_session_id = None, include_document_contents = True)

      Returns conversation history in a format for LLM calls.

      :param external_session_id: External session ID of the conversation.
      :type external_session_id: str
      :param include_document_contents: If true, include contents from uploaded documents in the generated messages.
      :type include_document_contents: bool

      :returns: Contains a list of AgentConversationMessage that represents the conversation.
      :rtype: AgentConversation



.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, verificationSummary=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single deployment conversation message.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param role: The role of the message sender
   :type role: str
   :param text: The text of the message
   :type text: str
   :param timestamp: The timestamp at which the message was sent
   :type timestamp: str
   :param messageIndex: The index of the message in the conversation
   :type messageIndex: int
   :param regenerateAttempt: The sequence number of regeneration. Not regenerated if 0.
   :type regenerateAttempt: int
   :param modelVersion: The model instance id associated with the message.
   :type modelVersion: str
   :param searchResults: The search results for the message.
   :type searchResults: dict
   :param isUseful: Whether this message was marked as useful or not
   :type isUseful: bool
   :param feedback: The feedback provided for the message
   :type feedback: str
   :param feedbackType: The type of feedback provided for the message
   :type feedbackType: str
   :param docInfos: A list of information on the documents associated with the message
   :type docInfos: list
   :param keywordArguments: User message only. A dictionary of keyword arguments used to generate response.
   :type keywordArguments: dict
   :param inputParams: User message only. A dictionary of input parameters used to generate response.
   :type inputParams: dict
   :param attachments: A list of attachments associated with the message.
   :type attachments: list
   :param responseVersion: The version of the response, used to differentiate w/ legacy agent response.
   :type responseVersion: str
   :param agentWorkflowNodeId: The workflow node id associated with the agent response.
   :type agentWorkflowNodeId: str
   :param nextAgentWorkflowNodeId: The id of the workflow node to be executed next.
   :type nextAgentWorkflowNodeId: str
   :param chatType: The type of chat llm that was run for the message.
   :type chatType: str
   :param agentResponse: Response from the agent. Only for conversation with agents.
   :type agentResponse: dict
   :param error: The error message in case of an error.
   :type error: str
   :param segments: The segments of the message.
   :type segments: list
   :param streamedData: Aggregated streamed messages from the agent.
   :type streamedData: str
   :param streamedSectionData: Aggregated streamed section outputs from the agent in a list.
   :type streamedSectionData: str
   :param highlights: Chunks with bounding boxes for highlighting the result sources.
   :type highlights: dict
   :param llmDisplayName: The display name of the LLM model used to generate the response. Only used for system-created bots.
   :type llmDisplayName: str
   :param llmBotIcon: The icon location of the LLM model used to generate the response. Only used for system-created bots.
   :type llmBotIcon: str
   :param formResponse: Contains form data response from the user when a Form Segment is given out by the bot.
   :type formResponse: dict
   :param routedLlm: The LLM that was chosen by RouteLLM to generate the response.
   :type routedLlm: str
   :param computePointsUsed: The number of compute points used for the message.
   :type computePointsUsed: int
   :param computerFiles: The list of files that were created by the computer agent.
   :type computerFiles: list
   :param toolUseRequest: The tool use request for the message.
   :type toolUseRequest: dict
   :param verificationSummary: The summary of the verification process for the message.
   :type verificationSummary: str


   .. py:attribute:: role
      :value: None



   .. py:attribute:: text
      :value: None



   .. py:attribute:: timestamp
      :value: None



   .. py:attribute:: message_index
      :value: None



   .. py:attribute:: regenerate_attempt
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: search_results
      :value: None



   .. py:attribute:: is_useful
      :value: None



   .. py:attribute:: feedback
      :value: None



   .. py:attribute:: feedback_type
      :value: None



   .. py:attribute:: doc_infos
      :value: None



   .. py:attribute:: keyword_arguments
      :value: None



   .. py:attribute:: input_params
      :value: None



   .. py:attribute:: attachments
      :value: None



   .. py:attribute:: response_version
      :value: None



   .. py:attribute:: agent_workflow_node_id
      :value: None



   .. py:attribute:: next_agent_workflow_node_id
      :value: None



   .. py:attribute:: chat_type
      :value: None



   .. py:attribute:: agent_response
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: segments
      :value: None



   .. py:attribute:: streamed_data
      :value: None



   .. py:attribute:: streamed_section_data
      :value: None



   .. py:attribute:: highlights
      :value: None



   .. py:attribute:: llm_display_name
      :value: None



   .. py:attribute:: llm_bot_icon
      :value: None



   .. py:attribute:: form_response
      :value: None



   .. py:attribute:: routed_llm
      :value: None



   .. py:attribute:: compute_points_used
      :value: None



   .. py:attribute:: computer_files
      :value: None



   .. py:attribute:: tool_use_request
      :value: None



   .. py:attribute:: verification_summary
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DeploymentConversationExport(client, deploymentConversationId=None, conversationExportHtml=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A deployment conversation html export, to be used for downloading the conversation.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param deploymentConversationId: The unique identifier of the deployment conversation.
   :type deploymentConversationId: str
   :param conversationExportHtml: The html string of the deployment conversation.
   :type conversationExportHtml: str


   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: conversation_export_html
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DeploymentStatistics(client, requestSeries=None, latencySeries=None, dateLabels=None, httpStatusSeries=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A set of statistics for a realtime deployment.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param requestSeries: A list of the number of requests per second.
   :type requestSeries: list
   :param latencySeries: A list of the latency in milliseconds for each request.
   :type latencySeries: list
   :param dateLabels: A list of date labels for each point in the series.
   :type dateLabels: list
   :param httpStatusSeries: A list of the HTTP status codes for each request.
   :type httpStatusSeries: list


   .. py:attribute:: request_series
      :value: None



   .. py:attribute:: latency_series
      :value: None



   .. py:attribute:: date_labels
      :value: None



   .. py:attribute:: http_status_series
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DocumentData(client, docId=None, mimeType=None, pageCount=None, totalPageCount=None, extractedText=None, embeddedText=None, pages=None, tokens=None, metadata=None, pageMarkdown=None, extractedPageText=None, augmentedPageText=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Data extracted from a docstore document.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param docId: Unique Docstore string identifier for the document.
   :type docId: str
   :param mimeType: The mime type of the document.
   :type mimeType: str
   :param pageCount: The number of pages for which the data is available. This is generally same as the total number of pages but may be less than the total number of pages in the document if processing is done only for selected pages.
   :type pageCount: int
   :param totalPageCount: The total number of pages in the document.
   :type totalPageCount: int
   :param extractedText: The extracted text in the document obtained from OCR.
   :type extractedText: str
   :param embeddedText: The embedded text in the document. Only available for digital documents.
   :type embeddedText: str
   :param pages: List of embedded text for each page in the document. Only available for digital documents.
   :type pages: list
   :param tokens: List of extracted tokens in the document obtained from OCR.
   :type tokens: list
   :param metadata: List of metadata for each page in the document.
   :type metadata: list
   :param pageMarkdown: The markdown text for the page.
   :type pageMarkdown: list
   :param extractedPageText: List of extracted text for each page in the document obtained from OCR. Available when return_extracted_page_text parameter is set to True in the document data retrieval API.
   :type extractedPageText: list
   :param augmentedPageText: List of extracted text for each page in the document obtained from OCR augmented with embedded links in the document.
   :type augmentedPageText: list


   .. py:attribute:: doc_id
      :value: None



   .. py:attribute:: mime_type
      :value: None



   .. py:attribute:: page_count
      :value: None



   .. py:attribute:: total_page_count
      :value: None



   .. py:attribute:: extracted_text
      :value: None



   .. py:attribute:: embedded_text
      :value: None



   .. py:attribute:: pages
      :value: None



   .. py:attribute:: tokens
      :value: None



   .. py:attribute:: metadata
      :value: None



   .. py:attribute:: page_markdown
      :value: None



   .. py:attribute:: extracted_page_text
      :value: None



   .. py:attribute:: augmented_page_text
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DocumentRetriever(client, name=None, documentRetrieverId=None, createdAt=None, featureGroupId=None, featureGroupName=None, indexingRequired=None, latestDocumentRetrieverVersion={}, documentRetrieverConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A vector store that stores embeddings for a list of document trunks.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the document retriever.
   :type name: str
   :param documentRetrieverId: The unique identifier of the vector store.
   :type documentRetrieverId: str
   :param createdAt: When the vector store was created.
   :type createdAt: str
   :param featureGroupId: The feature group id associated with the document retriever.
   :type featureGroupId: str
   :param featureGroupName: The feature group name associated with the document retriever.
   :type featureGroupName: str
   :param indexingRequired: Whether the document retriever is required to be indexed due to changes in underlying data.
   :type indexingRequired: bool
   :param latestDocumentRetrieverVersion: The latest version of vector store.
   :type latestDocumentRetrieverVersion: DocumentRetrieverVersion
   :param documentRetrieverConfig: The config for vector store creation.
   :type documentRetrieverConfig: VectorStoreConfig


   .. py:attribute:: name
      :value: None



   .. py:attribute:: document_retriever_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: feature_group_name
      :value: None



   .. py:attribute:: indexing_required
      :value: None



   .. py:attribute:: latest_document_retriever_version


   .. py:attribute:: document_retriever_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: rename(name)

      Updates an existing document retriever.

      :param name: The name to update the document retriever with.
      :type name: str

      :returns: The updated document retriever.
      :rtype: DocumentRetriever



   .. py:method:: create_version(feature_group_id = None, document_retriever_config = None)

      Creates a document retriever version from the latest version of the feature group that the document retriever associated with.

      :param feature_group_id: The ID of the feature group to update the document retriever with.
      :type feature_group_id: str
      :param document_retriever_config: The configuration, including chunk_size and chunk_overlap_fraction, for document retrieval.
      :type document_retriever_config: VectorStoreConfig

      :returns: The newly created document retriever version.
      :rtype: DocumentRetrieverVersion



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: DocumentRetriever



   .. py:method:: describe()

      Describe a Document Retriever.

      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str

      :returns: The document retriever object.
      :rtype: DocumentRetriever



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      List all the document retriever versions with a given ID.

      :param limit: The number of vector store versions to retrieve. The maximum value is 100.
      :type limit: int
      :param start_after_version: An offset parameter to exclude all document retriever versions up to this specified one.
      :type start_after_version: str

      :returns: All the document retriever versions associated with the document retriever.
      :rtype: list[DocumentRetrieverVersion]



   .. py:method:: get_document_snippet(document_id, start_word_index = None, end_word_index = None)

      Get a snippet from documents in the document retriever.

      :param document_id: The ID of the document to retrieve the snippet from.
      :type document_id: str
      :param start_word_index: If provided, will start the snippet at the index (of words in the document) specified.
      :type start_word_index: int
      :param end_word_index: If provided, will end the snippet at the index of (of words in the document) specified.
      :type end_word_index: int

      :returns: The documentation snippet found from the document retriever.
      :rtype: DocumentRetrieverLookupResult



   .. py:method:: restart()

      Restart the document retriever if it is stopped or has failed. This will start the deployment of the document retriever,

      but will not wait for it to be ready. You need to call wait_until_ready to wait until the deployment is ready.


      :param document_retriever_id: A unique string identifier associated with the document retriever.
      :type document_retriever_id: str



   .. py:method:: wait_until_ready(timeout = 3600)

      A waiting call until document retriever is ready. It restarts the document retriever if it is stopped.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 3600 seconds.
      :type timeout: int



   .. py:method:: wait_until_deployment_ready(timeout = 3600)

      A waiting call until the document retriever deployment is ready to serve.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 3600 seconds.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the document retriever. It represents indexing status until indexing isn't complete, and deployment status after indexing is complete.

      :returns: A string describing the status of a document retriever (pending, indexing, complete, active, etc.).
      :rtype: str



   .. py:method:: get_deployment_status()

      Gets the deployment status of the document retriever.

      :returns: A string describing the deployment status of document retriever (pending, deploying, active, etc.).
      :rtype: str



   .. py:method:: get_matching_documents(query, filters = None, limit = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None, filter_clause = None, crowding_limits = None, include_text_search = False)

      Lookup document retrievers and return the matching documents from the document retriever deployed with given query.

      Original documents are split into chunks and stored in the document retriever. This lookup function will return the relevant chunks
      from the document retriever. The returned chunks could be expanded to include more words from the original documents and merged if they
      are overlapping, and permitted by the settings provided. The returned chunks are sorted by relevance.


      :param query: The query to search for.
      :type query: str
      :param filters: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filters: dict
      :param limit: If provided, will limit the number of results to the value specified.
      :type limit: int
      :param result_columns: If provided, will limit the column properties present in each result to those specified in this list.
      :type result_columns: list
      :param max_words: If provided, will limit the total number of words in the results to the value specified.
      :type max_words: int
      :param num_retrieval_margin_words: If provided, will add this number of words from left and right of the returned chunks.
      :type num_retrieval_margin_words: int
      :param max_words_per_chunk: If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
      :type max_words_per_chunk: int
      :param score_multiplier_column: If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
      :type score_multiplier_column: str
      :param min_score: If provided, will filter out the results with score lower than the value specified.
      :type min_score: float
      :param required_phrases: If provided, each result will have at least one of the phrases.
      :type required_phrases: list
      :param filter_clause: If provided, filter the results of the query using this sql where clause.
      :type filter_clause: str
      :param crowding_limits: A dictionary mapping metadata columns to the maximum number of results per unique value of the column. This is used to ensure diversity of metadata attribute values in the results. If a particular attribute value has already reached its maximum count, further results with that same attribute value will be excluded from the final result set.
      :type crowding_limits: dict
      :param include_text_search: If true, combine the ranking of results from a BM25 text search over the documents with the vector search using reciprocal rank fusion. It leverages both lexical and semantic matching for better overall results. It's particularly valuable in professional, technical, or specialized fields where both precision in terminology and understanding of context are important.
      :type include_text_search: bool

      :returns: The relevant documentation results found from the document retriever.
      :rtype: list[DocumentRetrieverLookupResult]



.. py:class:: DocumentRetrieverLookupResult(client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None, metadata=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Result of a document retriever lookup.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param document: The document that was looked up.
   :type document: str
   :param score: Score of the document with respect to the query.
   :type score: float
   :param properties: Properties of the retrieved documents.
   :type properties: dict
   :param pages: Pages of the retrieved text from the original document.
   :type pages: list
   :param boundingBoxes: Bounding boxes of the retrieved text from the original document.
   :type boundingBoxes: list
   :param documentSource: Document source name.
   :type documentSource: str
   :param imageIds: List of Image IDs for all the pages.
   :type imageIds: list
   :param metadata: Metadata column values for the retrieved documents.
   :type metadata: dict


   .. py:attribute:: document
      :value: None



   .. py:attribute:: score
      :value: None



   .. py:attribute:: properties
      :value: None



   .. py:attribute:: pages
      :value: None



   .. py:attribute:: bounding_boxes
      :value: None



   .. py:attribute:: document_source
      :value: None



   .. py:attribute:: image_ids
      :value: None



   .. py:attribute:: metadata
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DocumentRetrieverVersion(client, documentRetrieverId=None, documentRetrieverVersion=None, createdAt=None, status=None, deploymentStatus=None, featureGroupId=None, featureGroupVersion=None, error=None, numberOfChunks=None, embeddingFileSize=None, warnings=None, resolvedConfig={}, documentRetrieverConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of document retriever.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param documentRetrieverId: The unique identifier of the Document Retriever.
   :type documentRetrieverId: str
   :param documentRetrieverVersion: The unique identifier of the Document Retriever version.
   :type documentRetrieverVersion: str
   :param createdAt: When the Document Retriever was created.
   :type createdAt: str
   :param status: The status of Document Retriever version. It represents indexing status until indexing isn't complete, and deployment status after indexing is complete.
   :type status: str
   :param deploymentStatus: The status of deploying the Document Retriever version.
   :type deploymentStatus: str
   :param featureGroupId: The feature group id associated with the document retriever.
   :type featureGroupId: str
   :param featureGroupVersion: The unique identifier of the feature group version at which the Document Retriever version is created.
   :type featureGroupVersion: str
   :param error: The error message when it failed to create the document retriever version.
   :type error: str
   :param numberOfChunks: The number of chunks for the document retriever.
   :type numberOfChunks: int
   :param embeddingFileSize: The size of embedding file for the document retriever.
   :type embeddingFileSize: int
   :param warnings: The warning messages when creating the document retriever.
   :type warnings: list
   :param resolvedConfig: The resolved configurations, such as default settings, for indexing documents.
   :type resolvedConfig: VectorStoreConfig
   :param documentRetrieverConfig: The config used to create the document retriever version.
   :type documentRetrieverConfig: VectorStoreConfig


   .. py:attribute:: document_retriever_id
      :value: None



   .. py:attribute:: document_retriever_version
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: deployment_status
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: number_of_chunks
      :value: None



   .. py:attribute:: embedding_file_size
      :value: None



   .. py:attribute:: warnings
      :value: None



   .. py:attribute:: resolved_config


   .. py:attribute:: document_retriever_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: delete()

      Delete a document retriever version.

      :param document_retriever_version: A unique string identifier associated with the document retriever version.
      :type document_retriever_version: str



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: DocumentRetrieverVersion



   .. py:method:: describe()

      Describe a document retriever version.

      :param document_retriever_version: A unique string identifier associated with the document retriever version.
      :type document_retriever_version: str

      :returns: The document retriever version object.
      :rtype: DocumentRetrieverVersion



   .. py:method:: wait_for_results(timeout=3600)

      A waiting call until document retriever version is complete.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_until_ready(timeout=3600)

      A waiting call until the document retriever version is ready.  It restarts the document retriever if it is stopped.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_until_deployment_ready(timeout = 3600)

      A waiting call until the document retriever deployment is ready to serve.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 3600 seconds.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the document retriever version.

      :returns: A string describing the status of a document retriever version (pending, complete, etc.).
      :rtype: str



   .. py:method:: get_deployment_status()

      Gets the status of the document retriever version.

      :returns: A string describing the deployment status of a document retriever version (pending, deploying, etc.).
      :rtype: str



.. py:class:: DriftDistribution(client, trainColumn=None, predictedColumn=None, metrics=None, distribution={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   How actuals or predicted values have changed in the training data versus predicted data

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param trainColumn: The feature name in the train table.
   :type trainColumn: str
   :param predictedColumn: The feature name in the prediction table.
   :type predictedColumn: str
   :param metrics: Drift measures.
   :type metrics: dict
   :param distribution: A FeatureDistribution, how the training data compares to the predicted data.
   :type distribution: FeatureDistribution


   .. py:attribute:: train_column
      :value: None



   .. py:attribute:: predicted_column
      :value: None



   .. py:attribute:: metrics
      :value: None



   .. py:attribute:: distribution


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: DriftDistributions(client, labelDrift={}, predictionDrift={}, bpPredictionDrift={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   For either actuals or predicted values, how it has changed in the training data versus some specified window

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param labelDrift: A DriftDistribution describing column names and the range of values for label drift.
   :type labelDrift: DriftDistribution
   :param predictionDrift: A DriftDistribution describing column names and the range of values for prediction drift.
   :type predictionDrift: DriftDistribution
   :param bpPredictionDrift: A DriftDistribution describing column names and the range of values for prediction drift, when the predictions come from BP.
   :type bpPredictionDrift: DriftDistribution


   .. py:attribute:: label_drift


   .. py:attribute:: prediction_drift


   .. py:attribute:: bp_prediction_drift


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Eda(client, edaId=None, name=None, createdAt=None, projectId=None, featureGroupId=None, referenceFeatureGroupVersion=None, testFeatureGroupVersion=None, edaConfigs=None, latestEdaVersion={}, refreshSchedules={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A exploratory data analysis object

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param edaId: The unique identifier of the eda object.
   :type edaId: str
   :param name: The user-friendly name for the eda object.
   :type name: str
   :param createdAt: Date and time at which the eda object was created.
   :type createdAt: str
   :param projectId: The project this eda object belongs to.
   :type projectId: str
   :param featureGroupId: Feature group ID for which eda analysis is being done.
   :type featureGroupId: str
   :param referenceFeatureGroupVersion: Reference Feature group version for data consistency analysis, will be latest feature group version for collinearity analysis.
   :type referenceFeatureGroupVersion: str
   :param testFeatureGroupVersion: Test Feature group version for data consistency analysis, will be latest feature group version for collinearity analysis.
   :type testFeatureGroupVersion: str
   :param edaConfigs: Configurations for eda object.
   :type edaConfigs: dict
   :param latestEdaVersion: The latest eda object version.
   :type latestEdaVersion: EdaVersion
   :param refreshSchedules: List of refresh schedules that indicate when the next model version will be trained.
   :type refreshSchedules: RefreshSchedule


   .. py:attribute:: eda_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: reference_feature_group_version
      :value: None



   .. py:attribute:: test_feature_group_version
      :value: None



   .. py:attribute:: eda_configs
      :value: None



   .. py:attribute:: latest_eda_version


   .. py:attribute:: refresh_schedules


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: rerun()

      Reruns the specified EDA object.

      :param eda_id: Unique string identifier of the EDA object to rerun.
      :type eda_id: str

      :returns: The EDA object that is being rerun.
      :rtype: Eda



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Eda



   .. py:method:: describe()

      Retrieves a full description of the specified EDA object.

      :param eda_id: Unique string identifier associated with the EDA object.
      :type eda_id: str

      :returns: Description of the EDA object.
      :rtype: Eda



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      Retrieves a list of versions for a given EDA object.

      :param limit: The maximum length of the list of all EDA versions.
      :type limit: int
      :param start_after_version: The ID of the version after which the list starts.
      :type start_after_version: str

      :returns: A list of EDA versions.
      :rtype: list[EdaVersion]



   .. py:method:: rename(name)

      Renames an EDA

      :param name: The new name to apply to the model monitor.
      :type name: str



   .. py:method:: delete()

      Deletes the specified EDA and all its versions.

      :param eda_id: Unique string identifier of the EDA to delete.
      :type eda_id: str



.. py:class:: EdaChartDescription(client, chartType=None, description=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Eda Chart Description.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param chartType: Name of chart.
   :type chartType: str
   :param description: Description of the eda chart.
   :type description: str


   .. py:attribute:: chart_type
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EdaCollinearity(client, columnNames=None, collinearityMatrix=None, groupFeatureDict=None, collinearityGroups=None, columnNamesX=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Eda Collinearity of the latest version of the data between all the features.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param columnNames: Name of all the features in the y axis of the collinearity matrix
   :type columnNames: list
   :param collinearityMatrix: A dict describing the collinearity between all the features
   :type collinearityMatrix: dict
   :param groupFeatureDict: A dict describing the index of the group from collinearity_groups a feature exists in
   :type groupFeatureDict: dict
   :param collinearityGroups: Groups created based on a collinearity threshold of 0.7
   :type collinearityGroups: list
   :param columnNamesX: Name of all the features in the x axis of the collinearity matrix
   :type columnNamesX: list


   .. py:attribute:: column_names
      :value: None



   .. py:attribute:: collinearity_matrix
      :value: None



   .. py:attribute:: group_feature_dict
      :value: None



   .. py:attribute:: collinearity_groups
      :value: None



   .. py:attribute:: column_names_x
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EdaDataConsistency(client, columnNames=None, primaryKeys=None, transformationColumnNames=None, baseDuplicates={}, compareDuplicates={}, deletions={}, transformations={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Eda Data Consistency, contained the duplicates in the base version, Comparison version, Deletions between the base and comparison and feature transformations between the base and comparison data.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param columnNames: Name of all the features in the data
   :type columnNames: list
   :param primaryKeys: Name of the primary keys in the data
   :type primaryKeys: list
   :param transformationColumnNames: Name of all the features that are not the primary keys
   :type transformationColumnNames: list
   :param baseDuplicates: A DataConsistencyDuplication describing the number of duplicates within the data
   :type baseDuplicates: DataConsistencyDuplication
   :param compareDuplicates: A DataConsistencyDuplication describing the number of duplicates within the data
   :type compareDuplicates: DataConsistencyDuplication
   :param deletions: A DataConsistencyDeletion describing the number of deletion between two versions in the data
   :type deletions: DataConsistencyDuplication
   :param transformations: A DataConsistencyTransformation the number of changes that occured per feature in the data
   :type transformations: DataConsistencyTransformation


   .. py:attribute:: column_names
      :value: None



   .. py:attribute:: primary_keys
      :value: None



   .. py:attribute:: transformation_column_names
      :value: None



   .. py:attribute:: base_duplicates


   .. py:attribute:: compare_duplicates


   .. py:attribute:: deletions


   .. py:attribute:: transformations


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EdaFeatureAssociation(client, data=None, isScatter=None, isBoxWhisker=None, xAxis=None, yAxis=None, xAxisColumnValues=None, yAxisColumnValues=None, dataColumns=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Eda Feature Association between two features in the data.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param data: the data to display the feature association between two features
   :type data: dict
   :param isScatter: A Boolean that represents if the data creates a scatter plot (for cases of numerical data vs numerical data)
   :type isScatter: bool
   :param isBoxWhisker: A Boolean that represents if the data creates a box whisker plot (For cases of categorical data vs numerical data and vice versa)
   :type isBoxWhisker: bool
   :param xAxis: Name of the feature selected for feature association (reference_feature_name) for x axis on the plot
   :type xAxis: str
   :param yAxis: Name of the feature selected for feature association (test_feature_name) for y axis on the plot
   :type yAxis: str
   :param xAxisColumnValues: Name of all the categories within the x_axis feature (if it is a categorical data type)
   :type xAxisColumnValues: list
   :param yAxisColumnValues: Name of all the categories within the y_axis feature (if it is a categorical data type)
   :type yAxisColumnValues: list
   :param dataColumns: A list of columns listed in the data as keys
   :type dataColumns: list


   .. py:attribute:: data
      :value: None



   .. py:attribute:: is_scatter
      :value: None



   .. py:attribute:: is_box_whisker
      :value: None



   .. py:attribute:: x_axis
      :value: None



   .. py:attribute:: y_axis
      :value: None



   .. py:attribute:: x_axis_column_values
      :value: None



   .. py:attribute:: y_axis_column_values
      :value: None



   .. py:attribute:: data_columns
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EdaFeatureCollinearity(client, selectedFeature=None, sortedColumnNames=None, featureCollinearity=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Eda Collinearity of the latest version of the data for a given feature.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param selectedFeature: Selected feature to show the collinearity
   :type selectedFeature: str
   :param sortedColumnNames: Name of all the features in the data sorted in descending order of collinearity value
   :type sortedColumnNames: list
   :param featureCollinearity: A dict describing the collinearity between a given feature and all the features in the data
   :type featureCollinearity: dict


   .. py:attribute:: selected_feature
      :value: None



   .. py:attribute:: sorted_column_names
      :value: None



   .. py:attribute:: feature_collinearity
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EdaForecastingAnalysis(client, primaryKeys=None, forecastingTargetFeature=None, timestampFeature=None, forecastFrequency=None, salesAcrossTime={}, cummulativeContribution={}, missingValueDistribution={}, historyLength={}, numRowsHistogram={}, productMaturity={}, seasonalityYear={}, seasonalityMonth={}, seasonalityWeekOfYear={}, seasonalityDayOfYear={}, seasonalityDayOfMonth={}, seasonalityDayOfWeek={}, seasonalityQuarter={}, seasonalityHour={}, seasonalityMinute={}, seasonalitySecond={}, autocorrelation={}, partialAutocorrelation={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Eda Forecasting Analysis of the latest version of the data.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param primaryKeys: Name of the primary keys in the data
   :type primaryKeys: list
   :param forecastingTargetFeature: Feature in the data that represents the target.
   :type forecastingTargetFeature: str
   :param timestampFeature: Feature in the data that represents the timestamp column.
   :type timestampFeature: str
   :param forecastFrequency: Frequency of data, could be hourly, daily, weekly, monthly, quarterly or yearly.
   :type forecastFrequency: str
   :param salesAcrossTime: Data showing average, p10, p90, median sales across time
   :type salesAcrossTime: ForecastingAnalysisGraphData
   :param cummulativeContribution: Data showing what percent of items contribute to what amount of sales.
   :type cummulativeContribution: ForecastingAnalysisGraphData
   :param missingValueDistribution: Data showing missing or null value distribution
   :type missingValueDistribution: ForecastingAnalysisGraphData
   :param historyLength: Data showing length of history distribution
   :type historyLength: ForecastingAnalysisGraphData
   :param numRowsHistogram: Data showing number of rows for an item distribution
   :type numRowsHistogram: ForecastingAnalysisGraphData
   :param productMaturity: Data showing length of how long a product has been alive with average, p10, p90 and median
   :type productMaturity: ForecastingAnalysisGraphData
   :param seasonalityYear: Data showing average, p10, p90, median sales across grouped years
   :type seasonalityYear: ForecastingAnalysisGraphData
   :param seasonalityMonth: Data showing average, p10, p90, median sales across grouped months
   :type seasonalityMonth: ForecastingAnalysisGraphData
   :param seasonalityWeekOfYear: Data showing average, p10, p90, median sales across week of year seasonality
   :type seasonalityWeekOfYear: ForecastingAnalysisGraphData
   :param seasonalityDayOfYear: Data showing average, p10, p90, median sales across day of year seasonality
   :type seasonalityDayOfYear: ForecastingAnalysisGraphData
   :param seasonalityDayOfMonth: Data showing average, p10, p90, median sales across day of month seasonality
   :type seasonalityDayOfMonth: ForecastingAnalysisGraphData
   :param seasonalityDayOfWeek: Data showing average, p10, p90, median sales across day of week seasonality
   :type seasonalityDayOfWeek: ForecastingAnalysisGraphData
   :param seasonalityQuarter: Data showing average, p10, p90, median sales across grouped quarters
   :type seasonalityQuarter: ForecastingAnalysisGraphData
   :param seasonalityHour: Data showing average, p10, p90, median sales across grouped hours
   :type seasonalityHour: ForecastingAnalysisGraphData
   :param seasonalityMinute: Data showing average, p10, p90, median sales across grouped minutes
   :type seasonalityMinute: ForecastingAnalysisGraphData
   :param seasonalitySecond: Data showing average, p10, p90, median sales across grouped seconds
   :type seasonalitySecond: ForecastingAnalysisGraphData
   :param autocorrelation: Data showing the correlation of the forecasting target and its lagged values at different time lags.
   :type autocorrelation: ForecastingAnalysisGraphData
   :param partialAutocorrelation: Data showing the correlation of the forecasting target and its lagged values, controlling for the effects of intervening lags.
   :type partialAutocorrelation: ForecastingAnalysisGraphData


   .. py:attribute:: primary_keys
      :value: None



   .. py:attribute:: forecasting_target_feature
      :value: None



   .. py:attribute:: timestamp_feature
      :value: None



   .. py:attribute:: forecast_frequency
      :value: None



   .. py:attribute:: sales_across_time


   .. py:attribute:: cummulative_contribution


   .. py:attribute:: missing_value_distribution


   .. py:attribute:: history_length


   .. py:attribute:: num_rows_histogram


   .. py:attribute:: product_maturity


   .. py:attribute:: seasonality_year


   .. py:attribute:: seasonality_month


   .. py:attribute:: seasonality_week_of_year


   .. py:attribute:: seasonality_day_of_year


   .. py:attribute:: seasonality_day_of_month


   .. py:attribute:: seasonality_day_of_week


   .. py:attribute:: seasonality_quarter


   .. py:attribute:: seasonality_hour


   .. py:attribute:: seasonality_minute


   .. py:attribute:: seasonality_second


   .. py:attribute:: autocorrelation


   .. py:attribute:: partial_autocorrelation


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EdaVersion(client, edaVersion=None, status=None, edaId=None, edaStartedAt=None, edaCompletedAt=None, referenceFeatureGroupVersion=None, testFeatureGroupVersion=None, error=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of an eda object

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param edaVersion: The unique identifier of a eda version.
   :type edaVersion: str
   :param status: The current status of the eda object.
   :type status: str
   :param edaId: A reference to the eda this version belongs to.
   :type edaId: str
   :param edaStartedAt: The start time and date of the eda process.
   :type edaStartedAt: str
   :param edaCompletedAt: The end time and date of the eda process.
   :type edaCompletedAt: str
   :param referenceFeatureGroupVersion: Feature group version IDs that this refresh pipeline run is analyzing.
   :type referenceFeatureGroupVersion: list[str]
   :param testFeatureGroupVersion: Feature group version IDs that this refresh pipeline run is analyzing.
   :type testFeatureGroupVersion: list[str]
   :param error: Relevant error if the status is FAILED.
   :type error: str


   .. py:attribute:: eda_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: eda_id
      :value: None



   .. py:attribute:: eda_started_at
      :value: None



   .. py:attribute:: eda_completed_at
      :value: None



   .. py:attribute:: reference_feature_group_version
      :value: None



   .. py:attribute:: test_feature_group_version
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: EdaVersion



   .. py:method:: describe()

      Retrieves a full description of the specified EDA version.

      :param eda_version: Unique string identifier of the EDA version.
      :type eda_version: str

      :returns: An EDA version.
      :rtype: EdaVersion



   .. py:method:: delete()

      Deletes the specified EDA version.

      :param eda_version: Unique string identifier of the EDA version to delete.
      :type eda_version: str



   .. py:method:: get_eda_collinearity()

      Gets the Collinearity between all features for the Exploratory Data Analysis.

      :param eda_version: Unique string identifier associated with the EDA instance.
      :type eda_version: str

      :returns: An object with a record of correlations between each feature for the EDA.
      :rtype: EdaCollinearity



   .. py:method:: get_eda_data_consistency(transformation_feature = None)

      Gets the data consistency for the Exploratory Data Analysis.

      :param transformation_feature: The transformation feature to get consistency for.
      :type transformation_feature: str

      :returns: Object with duplication, deletion, and transformation data for data consistency analysis for an EDA.
      :rtype: EdaDataConsistency



   .. py:method:: get_collinearity_for_feature(feature_name = None)

      Gets the Collinearity for the given feature from the Exploratory Data Analysis.

      :param feature_name: Name of the feature for which correlation is shown.
      :type feature_name: str

      :returns: Object with a record of correlations for the provided feature for an EDA.
      :rtype: EdaFeatureCollinearity



   .. py:method:: get_feature_association(reference_feature_name, test_feature_name)

      Gets the Feature Association for the given features from the feature group version within the eda_version.

      :param reference_feature_name: Name of the feature for feature association (on x-axis for the plots generated for the Feature association in the product).
      :type reference_feature_name: str
      :param test_feature_name: Name of the feature for feature association (on y-axis for the plots generated for the Feature association in the product).
      :type test_feature_name: str

      :returns: An object with a record of data for the feature association between the two given features for an EDA version.
      :rtype: EdaFeatureAssociation



   .. py:method:: get_eda_forecasting_analysis()

      Gets the Forecasting analysis for the Exploratory Data Analysis.

      :param eda_version: Unique string identifier associated with the EDA version.
      :type eda_version: str

      :returns: Object with forecasting analysis that includes sales_across_time, cummulative_contribution, missing_value_distribution, history_length, num_rows_histogram, product_maturity data.
      :rtype: EdaForecastingAnalysis



   .. py:method:: wait_for_eda(timeout=1200)

      A waiting call until eda version is ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the eda version.

      :returns: A string describing the status of the model monitor version, for e.g., pending, complete, etc.
      :rtype: str



.. py:class:: EditImageModels(client, models=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Edit image models

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param models: The models available for edit image.
   :type models: list


   .. py:attribute:: models
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: EmbeddingFeatureDriftDistribution(client, distance=None, jsDistance=None, wsDistance=None, ksStatistic=None, psi=None, csi=None, chiSquare=None, averageDrift={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Feature distribution for embeddings

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param distance: Histogram data of KL divergences between the training distribution and the range of values in the specified window.
   :type distance: list
   :param jsDistance: Histogram data of JS divergence between the training distribution and the range of values in the specified window.
   :type jsDistance: list
   :param wsDistance: Histogram data of Wasserstein distance between the training distribution and the range of values in the specified window.
   :type wsDistance: list
   :param ksStatistic: Histogram data of Kolmogorov-Smirnov statistic computed between the training distribution and the range of values in the specified window.
   :type ksStatistic: list
   :param psi: Histogram data of Population stability index computed between the training distribution and the range of values in the specified window.
   :type psi: list
   :param csi: Histogram data of Characteristic Stability Index computed between the training distribution and the range of values in the specified window.
   :type csi: list
   :param chiSquare: Histogram data of Chi-square statistic computed between the training distribution and the range of values in the specified window.
   :type chiSquare: list
   :param averageDrift: Average drift embedding for each type of drift
   :type averageDrift: DriftTypesValue


   .. py:attribute:: distance
      :value: None



   .. py:attribute:: js_distance
      :value: None



   .. py:attribute:: ws_distance
      :value: None



   .. py:attribute:: ks_statistic
      :value: None



   .. py:attribute:: psi
      :value: None



   .. py:attribute:: csi
      :value: None



   .. py:attribute:: chi_square
      :value: None



   .. py:attribute:: average_drift


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ExecuteFeatureGroupOperation(client, featureGroupOperationRunId=None, status=None, error=None, query=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The result of executing a SQL query

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupOperationRunId: The run id of the operation
   :type featureGroupOperationRunId: str
   :param status: The status of the operation
   :type status: str
   :param error: The error message if the operation failed
   :type error: str
   :param query: The SQL query of the operation
   :type query: str


   .. py:attribute:: feature_group_operation_run_id
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: query
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: wait_for_results(timeout=3600, delay=2)

      A waiting call until query is executed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int
      :param delay: Polling interval for checking timeout.
      :type delay: int



   .. py:method:: wait_for_execution(timeout=3600, delay=2)

      A waiting call until query is executed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int
      :param delay: Polling interval for checking timeout.
      :type delay: int



   .. py:method:: get_status()

      Gets the status of the query execution

      :returns: A string describing the status of a query execution (pending, complete, etc.).
      :rtype: str



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: DatasetVersion



   .. py:method:: describe()

      Gets the description of the query execution

      :returns: A ExecuteFeatureGroupOperation object describing the query execution.
      :rtype: ExecuteFeatureGroupOperation



   .. py:method:: _download_avro_file(file_part, tmp_dir, part_index)


   .. py:method:: load_as_pandas(max_workers=10)

      Loads the result data into a pandas dataframe

      :param max_workers: The number of threads.
      :type max_workers: int

      :returns: A pandas dataframe displaying the data from execution.
      :rtype: DataFrame



.. py:class:: ExternalApplication(client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None, hasThinkingOption=None, onlyImageGenEnabled=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An external application.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the external application.
   :type name: str
   :param externalApplicationId: The unique identifier of the external application.
   :type externalApplicationId: str
   :param deploymentId: The deployment id associated with the external application.
   :type deploymentId: str
   :param description: The description of the external application.
   :type description: str
   :param logo: The logo.
   :type logo: str
   :param theme: The theme used for the External Application.
   :type theme: dict
   :param userGroupIds: A list of App User Groups with access to this external application
   :type userGroupIds: list
   :param useCase: Use Case of the project of this deployment
   :type useCase: str
   :param isAgent: Whether the external application is an agent.
   :type isAgent: bool
   :param status: The status of the deployment.
   :type status: str
   :param deploymentConversationRetentionHours: The retention policy for the external application.
   :type deploymentConversationRetentionHours: int
   :param managedUserService: The external service that is managing the user accounts.
   :type managedUserService: str
   :param predictionOverrides: The prediction overrides for the external application.
   :type predictionOverrides: dict
   :param isSystemCreated: Whether the external application is system created.
   :type isSystemCreated: bool
   :param isCustomizable: Whether the external application is customizable.
   :type isCustomizable: bool
   :param isDeprecated: Whether the external application is deprecated. Only applicable for system created bots. Deprecated external applications will not show in the UI.
   :type isDeprecated: bool
   :param isVisible: Whether the external application should be shown in the dropdown.
   :type isVisible: bool
   :param hasThinkingOption: Whether to show the thinking option in the toolbar.
   :type hasThinkingOption: bool
   :param onlyImageGenEnabled: Whether to LLM only allows image generation.
   :type onlyImageGenEnabled: bool


   .. py:attribute:: name
      :value: None



   .. py:attribute:: external_application_id
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: logo
      :value: None



   .. py:attribute:: theme
      :value: None



   .. py:attribute:: user_group_ids
      :value: None



   .. py:attribute:: use_case
      :value: None



   .. py:attribute:: is_agent
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: deployment_conversation_retention_hours
      :value: None



   .. py:attribute:: managed_user_service
      :value: None



   .. py:attribute:: prediction_overrides
      :value: None



   .. py:attribute:: is_system_created
      :value: None



   .. py:attribute:: is_customizable
      :value: None



   .. py:attribute:: is_deprecated
      :value: None



   .. py:attribute:: is_visible
      :value: None



   .. py:attribute:: has_thinking_option
      :value: None



   .. py:attribute:: only_image_gen_enabled
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: update(name = None, description = None, theme = None, deployment_id = None, deployment_conversation_retention_hours = None, reset_retention_policy = False)

      Updates an External Application.

      :param name: The name of the External Application.
      :type name: str
      :param description: The description of the External Application. This will be shown to users when they access the External Application.
      :type description: str
      :param theme: The visual theme of the External Application.
      :type theme: dict
      :param deployment_id: The ID of the deployment to use.
      :type deployment_id: str
      :param deployment_conversation_retention_hours: The number of hours to retain the conversations for.
      :type deployment_conversation_retention_hours: int
      :param reset_retention_policy: If true, the retention policy will be removed.
      :type reset_retention_policy: bool

      :returns: The updated External Application.
      :rtype: ExternalApplication



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: ExternalApplication



   .. py:method:: describe()

      Describes an External Application.

      :param external_application_id: The ID of the External Application.
      :type external_application_id: str

      :returns: The External Application.
      :rtype: ExternalApplication



   .. py:method:: delete()

      Deletes an External Application.

      :param external_application_id: The ID of the External Application.
      :type external_application_id: str



.. py:class:: ExternalInvite(client, userAlreadyInOrg=None, userAlreadyInAppGroup=None, userExistsAsInternal=None, successfulInvites=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The response of the invites for different emails

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param userAlreadyInOrg: List of user emails not successfully invited, because they are already in the organization.
   :type userAlreadyInOrg: list
   :param userAlreadyInAppGroup: List of user emails not successfully invited, because they are already in the application group.
   :type userAlreadyInAppGroup: list
   :param userExistsAsInternal: List of user emails not successfully invited, because they are already internal users.
   :type userExistsAsInternal: list
   :param successfulInvites: List of users successfully invited.
   :type successfulInvites: list


   .. py:attribute:: user_already_in_org
      :value: None



   .. py:attribute:: user_already_in_app_group
      :value: None



   .. py:attribute:: user_exists_as_internal
      :value: None



   .. py:attribute:: successful_invites
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ExtractedFields(client, data=None, rawLlmResponse=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The fields extracted from a document.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param data: The fields/data extracted from the document.
   :type data: dict
   :param rawLlmResponse: The raw llm response. Only returned if it could not be parsed to json dict.
   :type rawLlmResponse: str


   .. py:attribute:: data
      :value: None



   .. py:attribute:: raw_llm_response
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Feature(client, name=None, selectClause=None, featureMapping=None, sourceTable=None, originalName=None, usingClause=None, orderClause=None, whereClause=None, featureType=None, dataType=None, detectedFeatureType=None, detectedDataType=None, columns={}, pointInTimeInfo={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A feature in a feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The unique name of the column
   :type name: str
   :param selectClause: The sql logic for creating this feature's data
   :type selectClause: str
   :param featureMapping: The Feature Mapping of the feature
   :type featureMapping: str
   :param sourceTable: The source table of the column
   :type sourceTable: str
   :param originalName: The original name of the column
   :type originalName: str
   :param usingClause: Nested Column Using Clause
   :type usingClause: str
   :param orderClause: Nested Column Ordering Clause
   :type orderClause: str
   :param whereClause: Nested Column Where Clause
   :type whereClause: str
   :param featureType: Feature Type of the Feature
   :type featureType: str
   :param dataType: Data Type of the Feature
   :type dataType: str
   :param detectedFeatureType: The detected feature type of the column
   :type detectedFeatureType: str
   :param detectedDataType: The detected data type of the column
   :type detectedDataType: str
   :param columns: Nested Features
   :type columns: NestedFeature
   :param pointInTimeInfo: Point in time column information
   :type pointInTimeInfo: PointInTimeFeature


   .. py:attribute:: name
      :value: None



   .. py:attribute:: select_clause
      :value: None



   .. py:attribute:: feature_mapping
      :value: None



   .. py:attribute:: source_table
      :value: None



   .. py:attribute:: original_name
      :value: None



   .. py:attribute:: using_clause
      :value: None



   .. py:attribute:: order_clause
      :value: None



   .. py:attribute:: where_clause
      :value: None



   .. py:attribute:: feature_type
      :value: None



   .. py:attribute:: data_type
      :value: None



   .. py:attribute:: detected_feature_type
      :value: None



   .. py:attribute:: detected_data_type
      :value: None



   .. py:attribute:: columns


   .. py:attribute:: point_in_time_info


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureDistribution(client, type=None, trainingDistribution=None, predictionDistribution=None, numericalTrainingDistribution=None, numericalPredictionDistribution=None, trainingStatistics=None, predictionStatistics=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   For a single feature, how it has changed in the training data versus some specified window

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param type: Data type of values in each distribution, typically 'categorical' or 'numerical'.
   :type type: str
   :param trainingDistribution: A dict describing the range of values in the training distribution.
   :type trainingDistribution: dict
   :param predictionDistribution: A dict describing the range of values in the specified window.
   :type predictionDistribution: dict
   :param numericalTrainingDistribution: A dict describing the summary statistics of the numerical training distribution.
   :type numericalTrainingDistribution: dict
   :param numericalPredictionDistribution: A dict describing the summary statistics of the numerical prediction distribution.
   :type numericalPredictionDistribution: dict
   :param trainingStatistics: A dict describing summary statistics of values in the training distribution.
   :type trainingStatistics: dict
   :param predictionStatistics: A dict describing summary statistics of values in the specified window.
   :type predictionStatistics: dict


   .. py:attribute:: type
      :value: None



   .. py:attribute:: training_distribution
      :value: None



   .. py:attribute:: prediction_distribution
      :value: None



   .. py:attribute:: numerical_training_distribution
      :value: None



   .. py:attribute:: numerical_prediction_distribution
      :value: None



   .. py:attribute:: training_statistics
      :value: None



   .. py:attribute:: prediction_statistics
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureDriftRecord(client, name=None, distance=None, jsDistance=None, wsDistance=None, ksStatistic=None, psi=None, csi=None, chiSquare=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Value of each type of drift

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: Name of feature.
   :type name: str
   :param distance: Symmetric sum of KL divergences between the training distribution and the range of values in the specified window.
   :type distance: float
   :param jsDistance: JS divergence between the training distribution and the range of values in the specified window.
   :type jsDistance: float
   :param wsDistance: Wasserstein distance between the training distribution and the range of values in the specified window.
   :type wsDistance: float
   :param ksStatistic: Kolmogorov-Smirnov statistic computed between the training distribution and the range of values in the specified window.
   :type ksStatistic: float
   :param psi: Population stability index computed between the training distribution and the range of values in the specified window.
   :type psi: float
   :param csi: Characteristic Stability Index computed between the training distribution and the range of values in the specified window.
   :type csi: float
   :param chiSquare: Chi-square statistic computed between the training distribution and the range of values in the specified window.
   :type chiSquare: float


   .. py:attribute:: name
      :value: None



   .. py:attribute:: distance
      :value: None



   .. py:attribute:: js_distance
      :value: None



   .. py:attribute:: ws_distance
      :value: None



   .. py:attribute:: ks_statistic
      :value: None



   .. py:attribute:: psi
      :value: None



   .. py:attribute:: csi
      :value: None



   .. py:attribute:: chi_square
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureDriftSummary(client, featureIndex=None, name=None, distance=None, jsDistance=None, wsDistance=None, ksStatistic=None, predictionDrift=None, targetColumn=None, dataIntegrityTimeseries=None, nestedSummary=None, psi=None, csi=None, chiSquare=None, nullViolations={}, rangeViolations={}, catViolations={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Summary of important model monitoring statistics for features available in a model monitoring instance

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureIndex: A list of dicts of eligible feature names and corresponding overall feature drift measures.
   :type featureIndex: list[dict]
   :param name: Name of feature.
   :type name: str
   :param distance: Symmetric sum of KL divergences between the training distribution and the range of values in the specified window.
   :type distance: float
   :param jsDistance: JS divergence between the training distribution and the range of values in the specified window.
   :type jsDistance: float
   :param wsDistance: Wasserstein distance between the training distribution and the range of values in the specified window.
   :type wsDistance: float
   :param ksStatistic: Kolmogorov-Smirnov statistic computed between the training distribution and the range of values in the specified window.
   :type ksStatistic: float
   :param predictionDrift: Drift for the target column.
   :type predictionDrift: float
   :param targetColumn: Target column name.
   :type targetColumn: str
   :param dataIntegrityTimeseries: Frequency vs Data Integrity Violation Charts.
   :type dataIntegrityTimeseries: dict
   :param nestedSummary: Summary of model monitoring statistics for nested features.
   :type nestedSummary: list[dict]
   :param psi: Population stability index computed between the training distribution and the range of values in the specified window.
   :type psi: float
   :param csi: Characteristic Stability Index computed between the training distribution and the range of values in the specified window.
   :type csi: float
   :param chiSquare: Chi-square statistic computed between the training distribution and the range of values in the specified window.
   :type chiSquare: float
   :param nullViolations: A list of dicts of feature names and a description of corresponding null violations.
   :type nullViolations: NullViolation
   :param rangeViolations: A list of dicts of numerical feature names and corresponding prediction range discrepancies.
   :type rangeViolations: RangeViolation
   :param catViolations: A list of dicts of categorical feature names and corresponding prediction range discrepancies.
   :type catViolations: CategoricalRangeViolation


   .. py:attribute:: feature_index
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: distance
      :value: None



   .. py:attribute:: js_distance
      :value: None



   .. py:attribute:: ws_distance
      :value: None



   .. py:attribute:: ks_statistic
      :value: None



   .. py:attribute:: prediction_drift
      :value: None



   .. py:attribute:: target_column
      :value: None



   .. py:attribute:: data_integrity_timeseries
      :value: None



   .. py:attribute:: nested_summary
      :value: None



   .. py:attribute:: psi
      :value: None



   .. py:attribute:: csi
      :value: None



   .. py:attribute:: chi_square
      :value: None



   .. py:attribute:: null_violations


   .. py:attribute:: range_violations


   .. py:attribute:: cat_violations


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroup(client, featureGroupId=None, modificationLock=None, name=None, featureGroupSourceType=None, tableName=None, sql=None, datasetId=None, functionSourceCode=None, functionName=None, sourceTables=None, createdAt=None, description=None, sqlError=None, latestVersionOutdated=None, referencedFeatureGroups=None, tags=None, primaryKey=None, updateTimestampKey=None, lookupKeys=None, streamingEnabled=None, incremental=None, mergeConfig=None, samplingConfig=None, cpuSize=None, memory=None, streamingReady=None, featureTags=None, moduleName=None, templateBindings=None, featureExpression=None, useOriginalCsvNames=None, pythonFunctionBindings=None, pythonFunctionName=None, useGpu=None, versionLimit=None, exportOnMaterialization=None, features={}, duplicateFeatures={}, pointInTimeGroups={}, annotationConfig={}, concatenationConfig={}, indexingConfig={}, codeSource={}, featureGroupTemplate={}, explanation={}, refreshSchedules={}, exportConnectorConfig={}, latestFeatureGroupVersion={}, operatorConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A feature group.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: Unique identifier for this feature group.
   :type featureGroupId: str
   :param modificationLock: If feature group is locked against a change or not.
   :type modificationLock: bool
   :param name:
   :type name: str
   :param featureGroupSourceType: The source type of the feature group
   :type featureGroupSourceType: str
   :param tableName: Unique table name of this feature group.
   :type tableName: str
   :param sql: SQL definition creating this feature group.
   :type sql: str
   :param datasetId: Dataset ID the feature group is sourced from.
   :type datasetId: str
   :param functionSourceCode: Source definition creating this feature group.
   :type functionSourceCode: str
   :param functionName: Function name to execute from the source code.
   :type functionName: str
   :param sourceTables: Source tables for this feature group.
   :type sourceTables: list[str]
   :param createdAt: Timestamp at which the feature group was created.
   :type createdAt: str
   :param description: Description of the feature group.
   :type description: str
   :param sqlError: Error message with this feature group.
   :type sqlError: str
   :param latestVersionOutdated: Is latest materialized feature group version outdated.
   :type latestVersionOutdated: bool
   :param referencedFeatureGroups: Feature groups this feature group is used in.
   :type referencedFeatureGroups: list[str]
   :param tags: Tags added to this feature group.
   :type tags: list[str]
   :param primaryKey: Primary index feature.
   :type primaryKey: str
   :param updateTimestampKey: Primary timestamp feature.
   :type updateTimestampKey: str
   :param lookupKeys: Additional indexed features for this feature group.
   :type lookupKeys: list[str]
   :param streamingEnabled: If true, the feature group can have data streamed to it.
   :type streamingEnabled: bool
   :param incremental: If feature group corresponds to an incremental dataset.
   :type incremental: bool
   :param mergeConfig: Merge configuration settings for the feature group.
   :type mergeConfig: dict
   :param samplingConfig: Sampling configuration for the feature group.
   :type samplingConfig: dict
   :param cpuSize: CPU size specified for the Python feature group.
   :type cpuSize: str
   :param memory: Memory in GB specified for the Python feature group.
   :type memory: int
   :param streamingReady: If true, the feature group is ready to receive streaming data.
   :type streamingReady: bool
   :param featureTags: Tags for features in this feature group
   :type featureTags: dict
   :param moduleName: Path to the file with the feature group function.
   :type moduleName: str
   :param templateBindings: Config specifying variable names and values to use when resolving a feature group template.
   :type templateBindings: dict
   :param featureExpression: If the dataset feature group has custom features, the SQL select expression creating those features.
   :type featureExpression: str
   :param useOriginalCsvNames: If true, the feature group will use the original column names in the source dataset.
   :type useOriginalCsvNames: bool
   :param pythonFunctionBindings: Config specifying variable names, types, and values to use when resolving a Python feature group.
   :type pythonFunctionBindings: dict
   :param pythonFunctionName: Name of the Python function the feature group was built from.
   :type pythonFunctionName: str
   :param useGpu: Whether this feature group is using gpu
   :type useGpu: bool
   :param versionLimit: Version limit for the feature group.
   :type versionLimit: int
   :param exportOnMaterialization: Whether to export the feature group on materialization.
   :type exportOnMaterialization: bool
   :param features: List of resolved features.
   :type features: Feature
   :param duplicateFeatures: List of duplicate features.
   :type duplicateFeatures: Feature
   :param pointInTimeGroups: List of Point In Time Groups.
   :type pointInTimeGroups: PointInTimeGroup
   :param annotationConfig: Annotation config for this feature
   :type annotationConfig: AnnotationConfig
   :param latestFeatureGroupVersion: Latest feature group version.
   :type latestFeatureGroupVersion: FeatureGroupVersion
   :param concatenationConfig: Feature group ID whose data will be concatenated into this feature group.
   :type concatenationConfig: ConcatenationConfig
   :param indexingConfig: Indexing config for the feature group for feature store
   :type indexingConfig: IndexingConfig
   :param codeSource: If a Python feature group, information on the source code.
   :type codeSource: CodeSource
   :param featureGroupTemplate: FeatureGroupTemplate to use when this feature group is attached to a template.
   :type featureGroupTemplate: FeatureGroupTemplate
   :param explanation: Natural language explanation of the feature group
   :type explanation: NaturalLanguageExplanation
   :param refreshSchedules: List of schedules that determines when the next version of the feature group will be created.
   :type refreshSchedules: RefreshSchedule
   :param exportConnectorConfig: The export config (file connector or database connector information) for feature group exports.
   :type exportConnectorConfig: FeatureGroupRefreshExportConfig
   :param operatorConfig: Operator configuration settings for the feature group.
   :type operatorConfig: OperatorConfig


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: modification_lock
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: feature_group_source_type
      :value: None



   .. py:attribute:: table_name
      :value: None



   .. py:attribute:: sql
      :value: None



   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: function_source_code
      :value: None



   .. py:attribute:: function_name
      :value: None



   .. py:attribute:: source_tables
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: sql_error
      :value: None



   .. py:attribute:: latest_version_outdated
      :value: None



   .. py:attribute:: referenced_feature_groups
      :value: None



   .. py:attribute:: tags
      :value: None



   .. py:attribute:: primary_key
      :value: None



   .. py:attribute:: update_timestamp_key
      :value: None



   .. py:attribute:: lookup_keys
      :value: None



   .. py:attribute:: streaming_enabled
      :value: None



   .. py:attribute:: incremental
      :value: None



   .. py:attribute:: merge_config
      :value: None



   .. py:attribute:: sampling_config
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: streaming_ready
      :value: None



   .. py:attribute:: feature_tags
      :value: None



   .. py:attribute:: module_name
      :value: None



   .. py:attribute:: template_bindings
      :value: None



   .. py:attribute:: feature_expression
      :value: None



   .. py:attribute:: use_original_csv_names
      :value: None



   .. py:attribute:: python_function_bindings
      :value: None



   .. py:attribute:: python_function_name
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: version_limit
      :value: None



   .. py:attribute:: export_on_materialization
      :value: None



   .. py:attribute:: features


   .. py:attribute:: duplicate_features


   .. py:attribute:: point_in_time_groups


   .. py:attribute:: annotation_config


   .. py:attribute:: concatenation_config


   .. py:attribute:: indexing_config


   .. py:attribute:: code_source


   .. py:attribute:: feature_group_template


   .. py:attribute:: explanation


   .. py:attribute:: refresh_schedules


   .. py:attribute:: export_connector_config


   .. py:attribute:: latest_feature_group_version


   .. py:attribute:: operator_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: add_to_project(project_id, feature_group_type = 'CUSTOM_TABLE')

      Adds a feature group to a project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str
      :param feature_group_type: The feature group type of the feature group, based on the use case under which the feature group is being created.
      :type feature_group_type: str



   .. py:method:: set_project_config(project_id, project_config = None)

      Sets a feature group's project config

      :param project_id: Unique string identifier for the project.
      :type project_id: str
      :param project_config: Feature group's project configuration.
      :type project_config: ProjectFeatureGroupConfig



   .. py:method:: get_project_config(project_id)

      Gets a feature group's project config

      :param project_id: Unique string identifier for the project.
      :type project_id: str

      :returns: The feature group's project configuration.
      :rtype: ProjectConfig



   .. py:method:: remove_from_project(project_id)

      Removes a feature group from a project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str



   .. py:method:: set_type(project_id, feature_group_type = 'CUSTOM_TABLE')

      Update the feature group type in a project. The feature group must already be added to the project.

      :param project_id: Unique identifier associated with the project.
      :type project_id: str
      :param feature_group_type: The feature group type to set the feature group as.
      :type feature_group_type: str



   .. py:method:: describe_annotation(feature_name = None, doc_id = None, feature_group_row_identifier = None)

      Get the latest annotation entry for a given feature group, feature, and document.

      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str

      :returns: The latest annotation entry for the given feature group, feature, document, and/or annotation key value.
      :rtype: AnnotationEntry



   .. py:method:: verify_and_describe_annotation(feature_name = None, doc_id = None, feature_group_row_identifier = None)

      Get the latest annotation entry for a given feature group, feature, and document along with verification information.

      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str

      :returns: The latest annotation entry for the given feature group, feature, document, and/or annotation key value. Includes the verification information.
      :rtype: AnnotationEntry



   .. py:method:: update_annotation_status(feature_name, status, doc_id = None, feature_group_row_identifier = None, save_metadata = False)

      Update the status of an annotation entry.

      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param status: The new status of the annotation. Must be one of the following: 'TODO', 'IN_PROGRESS', 'DONE'.
      :type status: str
      :param doc_id: The ID of the primary document the annotation is on. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type doc_id: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the feature group's primary / identifier key value. At least one of the doc_id or feature_group_row_identifier must be provided in order to identify the correct annotation.
      :type feature_group_row_identifier: str
      :param save_metadata: If True, save the metadata for the annotation entry.
      :type save_metadata: bool

      :returns: The updated annotation entry.
      :rtype: AnnotationEntry



   .. py:method:: get_document_to_annotate(project_id, feature_name, feature_group_row_identifier = None, get_previous = False)

      Get an available document that needs to be annotated for a annotation feature group.

      :param project_id: The ID of the project that the annotation is associated with.
      :type project_id: str
      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param feature_group_row_identifier: The key value of the feature group row the annotation is on (cast to string). Usually the primary key value. If provided, fetch the immediate next (or previous) available document.
      :type feature_group_row_identifier: str
      :param get_previous: If True, get the previous document instead of the next document. Applicable if feature_group_row_identifier is provided.
      :type get_previous: bool

      :returns: The document to annotate.
      :rtype: AnnotationDocument



   .. py:method:: get_annotations_status(feature_name = None, check_for_materialization = False)

      Get the status of the annotations for a given feature group and feature.

      :param feature_name: The name of the feature the annotation is on.
      :type feature_name: str
      :param check_for_materialization: If True, check if the feature group needs to be materialized before using for annotations.
      :type check_for_materialization: bool

      :returns: The status of the annotations for the given feature group and feature.
      :rtype: AnnotationsStatus



   .. py:method:: import_annotation_labels(file, annotation_type)

      Imports annotation labels from csv file. All valid values in the file will be imported as labels (including header row if present).

      :param file: The file to import. Must be a csv file.
      :type file: io.TextIOBase
      :param annotation_type: The type of the annotation.
      :type annotation_type: str

      :returns: The annotation config for the feature group.
      :rtype: AnnotationConfig



   .. py:method:: create_sampling(table_name, sampling_config, description = None)

      Creates a new Feature Group defined as a sample of rows from another Feature Group.

      For efficiency, sampling is approximate unless otherwise specified. (e.g. the number of rows may vary slightly from what was requested).


      :param table_name: The unique name to be given to this sampling Feature Group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param sampling_config: Dictionary defining the sampling method and its parameters.
      :type sampling_config: SamplingConfig
      :param description: A human-readable description of this Feature Group.
      :type description: str

      :returns: The created Feature Group.
      :rtype: FeatureGroup



   .. py:method:: set_sampling_config(sampling_config)

      Set a FeatureGroup’s sampling to the config values provided, so that the rows the FeatureGroup returns will be a sample of those it would otherwise have returned.

      :param sampling_config: A JSON string object specifying the sampling method and parameters specific to that sampling method. An empty sampling_config indicates no sampling.
      :type sampling_config: SamplingConfig

      :returns: The updated FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: set_merge_config(merge_config)

      Set a MergeFeatureGroup’s merge config to the values provided, so that the feature group only returns a bounded range of an incremental dataset.

      :param merge_config: JSON object string specifying the merge rule. An empty merge_config will default to only including the latest dataset version.
      :type merge_config: MergeConfig

      :returns: The updated FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: set_operator_config(operator_config)

      Set a OperatorFeatureGroup’s operator config to the values provided.

      :param operator_config: A dictionary object specifying the pre-defined operations.
      :type operator_config: OperatorConfig

      :returns: The updated FeatureGroup.
      :rtype: FeatureGroup



   .. py:method:: set_schema(schema)

      Creates a new schema and points the feature group to the new feature group schema ID.

      :param schema: JSON string containing an array of objects with 'name' and 'dataType' properties.
      :type schema: list



   .. py:method:: get_schema(project_id = None)

      Returns a schema for a given FeatureGroup in a project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str

      :returns: A list of objects for each column in the specified feature group.
      :rtype: list[Feature]



   .. py:method:: create_feature(name, select_expression)

      Creates a new feature in a Feature Group from a SQL select statement.

      :param name: The name of the feature to add.
      :type name: str
      :param select_expression: SQL SELECT expression to create the feature.
      :type select_expression: str

      :returns: A Feature Group object with the newly added feature.
      :rtype: FeatureGroup



   .. py:method:: add_tag(tag)

      Adds a tag to the feature group

      :param tag: The tag to add to the feature group.
      :type tag: str



   .. py:method:: remove_tag(tag)

      Removes a tag from the specified feature group.

      :param tag: The tag to remove from the feature group.
      :type tag: str



   .. py:method:: add_annotatable_feature(name, annotation_type)

      Add an annotatable feature in a Feature Group

      :param name: The name of the feature to add.
      :type name: str
      :param annotation_type: The type of annotation to set.
      :type annotation_type: str

      :returns: The feature group after the feature has been set
      :rtype: FeatureGroup



   .. py:method:: set_feature_as_annotatable_feature(feature_name, annotation_type, feature_group_row_identifier_feature = None, doc_id_feature = None)

      Sets an existing feature as an annotatable feature (Feature that can be annotated).

      :param feature_name: The name of the feature to set as annotatable.
      :type feature_name: str
      :param annotation_type: The type of annotation label to add.
      :type annotation_type: str
      :param feature_group_row_identifier_feature: The key value of the feature group row the annotation is on (cast to string) and uniquely identifies the feature group row. At least one of the doc_id or key value must be provided so that the correct annotation can be identified.
      :type feature_group_row_identifier_feature: str
      :param doc_id_feature: The name of the document ID feature.
      :type doc_id_feature: str

      :returns: A feature group object with the newly added annotatable feature.
      :rtype: FeatureGroup



   .. py:method:: set_annotation_status_feature(feature_name)

      Sets a feature as the annotation status feature for a feature group.

      :param feature_name: The name of the feature to set as the annotation status feature.
      :type feature_name: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: unset_feature_as_annotatable_feature(feature_name)

      Unsets a feature as annotatable

      :param feature_name: The name of the feature to unset.
      :type feature_name: str

      :returns: The feature group after unsetting the feature
      :rtype: FeatureGroup



   .. py:method:: add_annotation_label(label_name, annotation_type, label_definition = None)

      Adds an annotation label

      :param label_name: The name of the label.
      :type label_name: str
      :param annotation_type: The type of the annotation to set.
      :type annotation_type: str
      :param label_definition: the definition of the label.
      :type label_definition: str

      :returns: The feature group after adding the annotation label
      :rtype: FeatureGroup



   .. py:method:: remove_annotation_label(label_name)

      Removes an annotation label

      :param label_name: The name of the label to remove.
      :type label_name: str

      :returns: The feature group after adding the annotation label
      :rtype: FeatureGroup



   .. py:method:: add_feature_tag(feature, tag)

      Adds a tag on a feature

      :param feature: The feature to set the tag on.
      :type feature: str
      :param tag: The tag to set on the feature.
      :type tag: str



   .. py:method:: remove_feature_tag(feature, tag)

      Removes a tag from a feature

      :param feature: The feature to remove the tag from.
      :type feature: str
      :param tag: The tag to remove.
      :type tag: str



   .. py:method:: create_nested_feature(nested_feature_name, table_name, using_clause, where_clause = None, order_clause = None)

      Creates a new nested feature in a feature group from a SQL statement.

      :param nested_feature_name: The name of the feature.
      :type nested_feature_name: str
      :param table_name: The table name of the feature group to nest. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param using_clause: The SQL join column or logic to join the nested table with the parent.
      :type using_clause: str
      :param where_clause: A SQL WHERE statement to filter the nested rows.
      :type where_clause: str
      :param order_clause: A SQL clause to order the nested rows.
      :type order_clause: str

      :returns: A feature group object with the newly added nested feature.
      :rtype: FeatureGroup



   .. py:method:: update_nested_feature(nested_feature_name, table_name = None, using_clause = None, where_clause = None, order_clause = None, new_nested_feature_name = None)

      Updates a previously existing nested feature in a feature group.

      :param nested_feature_name: The name of the feature to be updated.
      :type nested_feature_name: str
      :param table_name: The name of the table. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str
      :param using_clause: The SQL join column or logic to join the nested table with the parent.
      :type using_clause: str
      :param where_clause: An SQL WHERE statement to filter the nested rows.
      :type where_clause: str
      :param order_clause: An SQL clause to order the nested rows.
      :type order_clause: str
      :param new_nested_feature_name: New name for the nested feature.
      :type new_nested_feature_name: str

      :returns: A feature group object with the updated nested feature.
      :rtype: FeatureGroup



   .. py:method:: delete_nested_feature(nested_feature_name)

      Delete a nested feature.

      :param nested_feature_name: The name of the feature to be deleted.
      :type nested_feature_name: str

      :returns: A feature group object without the specified nested feature.
      :rtype: FeatureGroup



   .. py:method:: create_point_in_time_feature(feature_name, history_table_name, aggregation_keys, timestamp_key, historical_timestamp_key, expression, lookback_window_seconds = None, lookback_window_lag_seconds = 0, lookback_count = None, lookback_until_position = 0)

      Creates a new point in time feature in a feature group using another historical feature group, window spec, and aggregate expression.

      We use the aggregation keys and either the lookbackWindowSeconds or the lookbackCount values to perform the window aggregation for every row in the current feature group.

      If the window is specified in seconds, then all rows in the history table which match the aggregation keys and with historicalTimeFeature greater than or equal to lookbackStartCount and less than the value of the current rows timeFeature are considered. An optional lookbackWindowLagSeconds (+ve or -ve) can be used to offset the current value of the timeFeature. If this value is negative, we will look at the future rows in the history table, so care must be taken to ensure that these rows are available in the online context when we are performing a lookup on this feature group. If the window is specified in counts, then we order the historical table rows aligning by time and consider rows from the window where the rank order is greater than or equal to lookbackCount and includes the row just prior to the current one. The lag is specified in terms of positions using lookbackUntilPosition.


      :param feature_name: The name of the feature to create.
      :type feature_name: str
      :param history_table_name: The table name of the history table.
      :type history_table_name: str
      :param aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation.
      :type aggregation_keys: list
      :param timestamp_key: Name of feature which contains the timestamp value for the point in time feature.
      :type timestamp_key: str
      :param historical_timestamp_key: Name of feature which contains the historical timestamp.
      :type historical_timestamp_key: str
      :param expression: SQL aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str
      :param lookback_window_seconds: If window is specified in terms of time, number of seconds in the past from the current time for start of the window.
      :type lookback_window_seconds: float
      :param lookback_window_lag_seconds: Optional lag to offset the closest point for the window. If it is positive, we delay the start of window. If it is negative, we are looking at the "future" rows in the history table.
      :type lookback_window_lag_seconds: float
      :param lookback_count: If window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, we delay the start of window by that many rows. If it is negative, we are looking at those many "future" rows in the history table.
      :type lookback_until_position: int

      :returns: A feature group object with the newly added nested feature.
      :rtype: FeatureGroup



   .. py:method:: update_point_in_time_feature(feature_name, history_table_name = None, aggregation_keys = None, timestamp_key = None, historical_timestamp_key = None, expression = None, lookback_window_seconds = None, lookback_window_lag_seconds = None, lookback_count = None, lookback_until_position = None, new_feature_name = None)

      Updates an existing Point-in-Time (PiT) feature in a feature group. See `createPointInTimeFeature` for detailed semantics.

      :param feature_name: The name of the feature.
      :type feature_name: str
      :param history_table_name: The table name of the history table. If not specified, we use the current table to do a self join.
      :type history_table_name: str
      :param aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation.
      :type aggregation_keys: list
      :param timestamp_key: Name of the feature which contains the timestamp value for the PiT feature.
      :type timestamp_key: str
      :param historical_timestamp_key: Name of the feature which contains the historical timestamp.
      :type historical_timestamp_key: str
      :param expression: SQL Aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str
      :param lookback_window_seconds: If the window is specified in terms of time, the number of seconds in the past from the current time for the start of the window.
      :type lookback_window_seconds: float
      :param lookback_window_lag_seconds: Optional lag to offset the closest point for the window. If it is positive, we delay the start of the window. If it is negative, we are looking at the "future" rows in the history table.
      :type lookback_window_lag_seconds: float
      :param lookback_count: If the window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, we delay the start of the window by that many rows. If it is negative, we are looking at those many "future" rows in the history table.
      :type lookback_until_position: int
      :param new_feature_name: New name for the PiT feature.
      :type new_feature_name: str

      :returns: A feature group object with the newly added nested feature.
      :rtype: FeatureGroup



   .. py:method:: create_point_in_time_group(group_name, window_key, aggregation_keys, history_table_name = None, history_window_key = None, history_aggregation_keys = None, lookback_window = None, lookback_window_lag = 0, lookback_count = None, lookback_until_position = 0)

      Create a Point-in-Time Group

      :param group_name: The name of the point in time group.
      :type group_name: str
      :param window_key: Name of feature to use for ordering the rows on the source table.
      :type window_key: str
      :param aggregation_keys: List of keys to perform on the source table for the window aggregation.
      :type aggregation_keys: list
      :param history_table_name: The table to use for aggregating, if not provided, the source table will be used.
      :type history_table_name: str
      :param history_window_key: Name of feature to use for ordering the rows on the history table. If not provided, the windowKey from the source table will be used.
      :type history_window_key: str
      :param history_aggregation_keys: List of keys to use for join the historical table and performing the window aggregation. If not provided, the aggregationKeys from the source table will be used. Must be the same length and order as the source table's aggregationKeys.
      :type history_aggregation_keys: list
      :param lookback_window: Number of seconds in the past from the current time for the start of the window. If 0, the lookback will include all rows.
      :type lookback_window: float
      :param lookback_window_lag: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed. If it is negative, "future" rows in the history table are used.
      :type lookback_window_lag: float
      :param lookback_count: If window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed by that many rows. If it is negative, those many "future" rows in the history table are used.
      :type lookback_until_position: int

      :returns: The feature group after the point in time group has been created.
      :rtype: FeatureGroup



   .. py:method:: generate_point_in_time_features(group_name, columns, window_functions, prefix = None)

      Generates and adds PIT features given the selected columns to aggregate over, and the operations to include.

      :param group_name: Name of the point-in-time group.
      :type group_name: str
      :param columns: List of columns to generate point-in-time features for.
      :type columns: list
      :param window_functions: List of window functions to operate on.
      :type window_functions: list
      :param prefix: Prefix for generated features, defaults to group name
      :type prefix: str

      :returns: Feature group object with newly added point-in-time features.
      :rtype: FeatureGroup



   .. py:method:: update_point_in_time_group(group_name, window_key = None, aggregation_keys = None, history_table_name = None, history_window_key = None, history_aggregation_keys = None, lookback_window = None, lookback_window_lag = None, lookback_count = None, lookback_until_position = None)

      Update Point-in-Time Group

      :param group_name: The name of the point-in-time group.
      :type group_name: str
      :param window_key: Name of feature which contains the timestamp value for the point-in-time feature.
      :type window_key: str
      :param aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation.
      :type aggregation_keys: list
      :param history_table_name: The table to use for aggregating, if not provided, the source table will be used.
      :type history_table_name: str
      :param history_window_key: Name of feature to use for ordering the rows on the history table. If not provided, the windowKey from the source table will be used.
      :type history_window_key: str
      :param history_aggregation_keys: List of keys to use for joining the historical table and performing the window aggregation. If not provided, the aggregationKeys from the source table will be used. Must be the same length and order as the source table's aggregationKeys.
      :type history_aggregation_keys: list
      :param lookback_window: Number of seconds in the past from the current time for the start of the window.
      :type lookback_window: float
      :param lookback_window_lag: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed. If it is negative, future rows in the history table are looked at.
      :type lookback_window_lag: float
      :param lookback_count: If window is specified in terms of count, the start position of the window (0 is the current row).
      :type lookback_count: int
      :param lookback_until_position: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed by that many rows. If it is negative, those many future rows in the history table are looked at.
      :type lookback_until_position: int

      :returns: The feature group after the update has been applied.
      :rtype: FeatureGroup



   .. py:method:: delete_point_in_time_group(group_name)

      Delete point in time group

      :param group_name: The name of the point in time group.
      :type group_name: str

      :returns: The feature group after the point in time group has been deleted.
      :rtype: FeatureGroup



   .. py:method:: create_point_in_time_group_feature(group_name, name, expression)

      Create point in time group feature

      :param group_name: The name of the point-in-time group.
      :type group_name: str
      :param name: The name of the feature to add to the point-in-time group.
      :type name: str
      :param expression: A SQL aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str

      :returns: The feature group after the update has been applied.
      :rtype: FeatureGroup



   .. py:method:: update_point_in_time_group_feature(group_name, name, expression)

      Update a feature's SQL expression in a point in time group

      :param group_name: The name of the point-in-time group.
      :type group_name: str
      :param name: The name of the feature to add to the point-in-time group.
      :type name: str
      :param expression: SQL aggregate expression which can convert a sequence of rows into a scalar value.
      :type expression: str

      :returns: The feature group after the update has been applied.
      :rtype: FeatureGroup



   .. py:method:: set_feature_type(feature, feature_type, project_id = None)

      Set the type of a feature in a feature group. Specify the feature group ID, feature name, and feature type, and the method will return the new column with the changes reflected.

      :param feature: The name of the feature.
      :type feature: str
      :param feature_type: The machine learning type of the data in the feature.
      :type feature_type: str
      :param project_id: Optional unique ID associated with the project.
      :type project_id: str

      :returns: The feature group after the data_type is applied.
      :rtype: Schema



   .. py:method:: concatenate_data(source_feature_group_id, merge_type = 'UNION', replace_until_timestamp = None, skip_materialize = False)

      Concatenates data from one Feature Group to another. Feature Groups can be merged if their schemas are compatible, they have the special `updateTimestampKey` column, and (if set) the `primaryKey` column. The second operand in the concatenate operation will be appended to the first operand (merge target).

      :param source_feature_group_id: The Feature Group to concatenate with the destination Feature Group.
      :type source_feature_group_id: str
      :param merge_type: `UNION` or `INTERSECTION`.
      :type merge_type: str
      :param replace_until_timestamp: The UNIX timestamp to specify the point until which we will replace data from the source Feature Group.
      :type replace_until_timestamp: int
      :param skip_materialize: If `True`, will not materialize the concatenated Feature Group.
      :type skip_materialize: bool



   .. py:method:: remove_concatenation_config()

      Removes the concatenation config on a destination feature group.

      :param feature_group_id: Unique identifier of the destination feature group to remove the concatenation configuration from.
      :type feature_group_id: str



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: FeatureGroup



   .. py:method:: describe()

      Describe a Feature Group.

      :param feature_group_id: A unique string identifier associated with the feature group.
      :type feature_group_id: str

      :returns: The feature group object.
      :rtype: FeatureGroup



   .. py:method:: set_indexing_config(primary_key = None, update_timestamp_key = None, lookup_keys = None)

      Sets various attributes of the feature group used for primary key, deployment lookups and streaming updates.

      :param primary_key: Name of the feature which defines the primary key of the feature group.
      :type primary_key: str
      :param update_timestamp_key: Name of the feature which defines the update timestamp of the feature group. Used in concatenation and primary key deduplication.
      :type update_timestamp_key: str
      :param lookup_keys: List of feature names which can be used in the lookup API to restrict the computation to a set of dataset rows. These feature names have to correspond to underlying dataset columns.
      :type lookup_keys: list



   .. py:method:: update(description = None)

      Modify an existing Feature Group.

      :param description: Description of the Feature Group.
      :type description: str

      :returns: Updated Feature Group object.
      :rtype: FeatureGroup



   .. py:method:: detach_from_template()

      Update a feature group to detach it from a template.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_template_bindings(template_bindings = None)

      Update the feature group template bindings for a template feature group.

      :param template_bindings: Values in these bindings override values set in the template.
      :type template_bindings: list

      :returns: Updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_python_function_bindings(python_function_bindings)

      Updates an existing Feature Group's Python function bindings from a user-provided Python Function. If a list of feature groups are supplied within the Python function bindings, we will provide DataFrames (Pandas in the case of Python) with the materialized feature groups for those input feature groups as arguments to the function.

      :param python_function_bindings: List of python function arguments.
      :type python_function_bindings: List



   .. py:method:: update_python_function(python_function_name, python_function_bindings = None, cpu_size = None, memory = None, use_gpu = None, use_original_csv_names = None)

      Updates an existing Feature Group's python function from a user provided Python Function. If a list of feature groups are supplied within the python function

      bindings, we will provide as arguments to the function DataFrame's (pandas in the case of Python) with the materialized
      feature groups for those input feature groups.


      :param python_function_name: The name of the python function to be associated with the feature group.
      :type python_function_name: str
      :param python_function_bindings: List of python function arguments.
      :type python_function_bindings: List
      :param cpu_size: Size of the CPU for the feature group python function.
      :type cpu_size: CPUSize
      :param memory: Memory (in GB) for the feature group python function.
      :type memory: MemorySize
      :param use_gpu: Whether the feature group needs a gpu or not. Otherwise default to CPU.
      :type use_gpu: bool
      :param use_original_csv_names: If enabled, it uses the original column names for input feature groups from CSV datasets.
      :type use_original_csv_names: bool



   .. py:method:: update_sql_definition(sql)

      Updates the SQL statement for a feature group.

      :param sql: The input SQL statement for the feature group.
      :type sql: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_dataset_feature_expression(feature_expression)

      Updates the SQL feature expression for a Dataset FeatureGroup's custom features

      :param feature_expression: The input SQL statement for the feature group.
      :type feature_expression: str

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_version_limit(version_limit)

      Updates the version limit for the feature group.

      :param version_limit: The maximum number of versions permitted for the feature group. Once this limit is exceeded, the oldest versions will be purged in a First-In-First-Out (FIFO) order.
      :type version_limit: int

      :returns: The updated feature group.
      :rtype: FeatureGroup



   .. py:method:: update_feature(name, select_expression = None, new_name = None)

      Modifies an existing feature in a feature group.

      :param name: Name of the feature to be updated.
      :type name: str
      :param select_expression: SQL statement for modifying the feature.
      :type select_expression: str
      :param new_name: New name of the feature.
      :type new_name: str

      :returns: Updated feature group object.
      :rtype: FeatureGroup



   .. py:method:: list_exports()

      Lists all of the feature group exports for the feature group

      :param feature_group_id: Unique identifier of the feature group
      :type feature_group_id: str

      :returns: List of feature group exports
      :rtype: list[FeatureGroupExport]



   .. py:method:: set_modifier_lock(locked = True)

      Lock a feature group to prevent modification.

      :param locked: Whether to disable or enable feature group modification (True or False).
      :type locked: bool



   .. py:method:: list_modifiers()

      List the users who can modify a given feature group.

      :param feature_group_id: Unique string identifier of the feature group.
      :type feature_group_id: str

      :returns: Information about the modification lock status and groups/organizations added to the feature group.
      :rtype: ModificationLockInfo



   .. py:method:: add_user_to_modifiers(email)

      Adds a user to a feature group.

      :param email: The email address of the user to be added.
      :type email: str



   .. py:method:: add_organization_group_to_modifiers(organization_group_id)

      Add OrganizationGroup to a feature group modifiers list

      :param organization_group_id: Unique string identifier of the organization group.
      :type organization_group_id: str



   .. py:method:: remove_user_from_modifiers(email)

      Removes a user from a specified feature group.

      :param email: The email address of the user to be removed.
      :type email: str



   .. py:method:: remove_organization_group_from_modifiers(organization_group_id)

      Removes an OrganizationGroup from a feature group modifiers list

      :param organization_group_id: The unique ID associated with the organization group.
      :type organization_group_id: str



   .. py:method:: delete_feature(name)

      Removes a feature from the feature group.

      :param name: Name of the feature to be deleted.
      :type name: str

      :returns: Updated feature group object.
      :rtype: FeatureGroup



   .. py:method:: delete()

      Deletes a Feature Group.

      :param feature_group_id: Unique string identifier for the feature group to be removed.
      :type feature_group_id: str



   .. py:method:: create_version(variable_bindings = None)

      Creates a snapshot for a specified feature group. Triggers materialization of the feature group. The new version of the feature group is created after it has materialized.

      :param variable_bindings: Dictionary defining variable bindings that override parent feature group values.
      :type variable_bindings: dict

      :returns: A feature group version.
      :rtype: FeatureGroupVersion



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      Retrieves a list of all feature group versions for the specified feature group.

      :param limit: The maximum length of the returned versions.
      :type limit: int
      :param start_after_version: Results will start after this version.
      :type start_after_version: str

      :returns: A list of feature group versions.
      :rtype: list[FeatureGroupVersion]



   .. py:method:: set_export_connector_config(feature_group_export_config = None)

      Sets FG export config for the given feature group.

      :param feature_group_export_config: The export config to be set for the given feature group.
      :type feature_group_export_config: FeatureGroupExportConfig



   .. py:method:: set_export_on_materialization(enable)

      Can be used to enable or disable exporting feature group data to the export connector associated with the feature group.

      :param enable: If true, will enable exporting feature group to the connector. If false, will disable.
      :type enable: bool



   .. py:method:: create_template(name, template_sql, template_variables, description = None, template_bindings = None, should_attach_feature_group_to_template = False)

      Create a feature group template.

      :param name: User-friendly name for this feature group template.
      :type name: str
      :param template_sql: The template SQL that will be resolved by applying values from the template variables to generate SQL for a feature group.
      :type template_sql: str
      :param template_variables: The template variables for resolving the template.
      :type template_variables: list
      :param description: Description of this feature group template.
      :type description: str
      :param template_bindings: If the feature group will be attached to the newly created template, set these variable bindings on that feature group.
      :type template_bindings: list
      :param should_attach_feature_group_to_template: Set to `True` to convert the feature group to a template feature group and attach it to the newly created template.
      :type should_attach_feature_group_to_template: bool

      :returns: The created feature group template.
      :rtype: FeatureGroupTemplate



   .. py:method:: suggest_template_for()

      Suggest values for a feature gruop template, based on a feature group.

      :param feature_group_id: Unique identifier associated with the feature group to use for suggesting values to use in the template.
      :type feature_group_id: str

      :returns: The suggested feature group template.
      :rtype: FeatureGroupTemplate



   .. py:method:: get_recent_streamed_data()

      Returns recently streamed data to a streaming feature group.

      :param feature_group_id: Unique string identifier associated with the feature group.
      :type feature_group_id: str



   .. py:method:: append_data(streaming_token, data)

      Appends new data into the feature group for a given lookup key recordId.

      :param streaming_token: The streaming token for authenticating requests.
      :type streaming_token: str
      :param data: The data to record as a JSON object.
      :type data: dict



   .. py:method:: append_multiple_data(streaming_token, data)

      Appends new data into the feature group for a given lookup key recordId.

      :param streaming_token: Streaming token for authenticating requests.
      :type streaming_token: str
      :param data: Data to record, as a list of JSON objects.
      :type data: list



   .. py:method:: upsert_data(data, streaming_token = None, blobs = None)

      Update new data into the feature group for a given lookup key record ID if the record ID is found; otherwise, insert new data into the feature group.

      :param data: The data to record, in JSON format.
      :type data: dict
      :param streaming_token: Optional streaming token for authenticating requests if upserting to streaming FG.
      :type streaming_token: str
      :param blobs: A dictionary of binary data to populate file fields' in data to upsert to the streaming FG.
      :type blobs: None

      :returns: The feature group row that was upserted.
      :rtype: FeatureGroupRow



   .. py:method:: delete_data(primary_key)

      Deletes a row from the feature group given the primary key

      :param primary_key: The primary key value for which to delete the feature group row
      :type primary_key: str



   .. py:method:: get_data(primary_key = None, num_rows = None)

      Gets the feature group rows for online updatable feature groups.

      If primary key is set, row corresponding to primary_key is returned.
      If num_rows is set, we return maximum of num_rows latest updated rows.


      :param primary_key: The primary key value for which to retrieve the feature group row (only for online feature groups).
      :type primary_key: str
      :param num_rows: Maximum number of rows to return from the feature group
      :type num_rows: int

      :returns: A list of feature group rows.
      :rtype: list[FeatureGroupRow]



   .. py:method:: get_natural_language_explanation(feature_group_version = None, model_id = None)

      Returns the saved natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version or Model

      :param feature_group_version: A unique string identifier associated with the Feature Group Version.
      :type feature_group_version: str
      :param model_id: A unique string identifier associated with the Model.
      :type model_id: str

      :returns: The object containing natural language explanation(s) as field(s).
      :rtype: NaturalLanguageExplanation



   .. py:method:: generate_natural_language_explanation(feature_group_version = None, model_id = None)

      Generates natural language explanation of an artifact with given ID. The artifact can be - Feature Group or Feature Group Version or Model

      :param feature_group_version: A unique string identifier associated with the Feature Group Version.
      :type feature_group_version: str
      :param model_id: A unique string identifier associated with the Model.
      :type model_id: str

      :returns: The object containing natural language explanation(s) as field(s).
      :rtype: NaturalLanguageExplanation



   .. py:method:: wait_for_dataset(timeout = 7200)

          A waiting call until the feature group's dataset, if any, is ready for use.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 7200 seconds.
      :type timeout: int



   .. py:method:: wait_for_upload(timeout = 7200)

          Waits for a feature group created from a dataframe to be ready for materialization and version creation.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 7200 seconds.
      :type timeout: int



   .. py:method:: wait_for_materialization(timeout = 7200)

      A waiting call until feature group is materialized.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 7200 seconds.
      :type timeout: int



   .. py:method:: wait_for_streaming_ready(timeout = 600)

      Waits for the feature group indexing config to be applied for streaming

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out. Default value given is 600 seconds.
      :type timeout: int



   .. py:method:: get_status(streaming_status = False)

      Gets the status of the feature group.

      :returns: A string describing the status of a feature group (pending, complete, etc.).
      :rtype: str



   .. py:method:: load_as_pandas()

      Loads the feature groups into a python pandas dataframe.

      :returns: A pandas dataframe with annotations and text_snippet columns.
      :rtype: DataFrame



   .. py:method:: load_as_pandas_documents(doc_id_column = 'doc_id', document_column = 'page_infos')

      Loads a feature group with documents data into a pandas dataframe.

      :param doc_id_column: The name of the feature / column containing the document ID.
      :type doc_id_column: str
      :param document_column: The name of the feature / column which either contains the document data itself or page infos with path to remotely stored documents. This column will be replaced with the extracted document data.
      :type document_column: str

      :returns: A pandas dataframe containing the extracted document data.
      :rtype: DataFrame



   .. py:method:: describe_dataset()

      Displays the dataset attached to a feature group.

      :returns: A dataset object with all the relevant information about the dataset.
      :rtype: Dataset



   .. py:method:: materialize()

      Materializes the feature group's latest change at the api call time. It'll skip materialization if no change since the current latest version.

      :returns: A feature group object with the lastest changes materialized.
      :rtype: FeatureGroup



.. py:class:: FeatureGroupDocument(client, featureGroupId=None, docId=None, status=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A document of a feature group.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: The ID of the feature group this row belongs to.
   :type featureGroupId: str
   :param docId: Unique document id
   :type docId: str
   :param status: The status of the document processing
   :type status: str


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: doc_id
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupExport(client, featureGroupExportId=None, failedWrites=None, totalWrites=None, featureGroupVersion=None, connectorType=None, outputLocation=None, fileFormat=None, databaseConnectorId=None, objectName=None, writeMode=None, databaseFeatureMapping=None, idColumn=None, status=None, createdAt=None, exportCompletedAt=None, additionalIdColumns=None, error=None, databaseOutputError=None, projectConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A feature Group Export Job

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupExportId: Unique identifier for this export.
   :type featureGroupExportId: str
   :param failedWrites: Number of failed writes.
   :type failedWrites: int
   :param totalWrites: Total number of writes.
   :type totalWrites: int
   :param featureGroupVersion: Version of the feature group being exported.
   :type featureGroupVersion: str
   :param connectorType: The type of connector
   :type connectorType: str
   :param outputLocation: File Connector location the feature group is being written to.
   :type outputLocation: str
   :param fileFormat: File format being written to `output_location`.
   :type fileFormat: str
   :param databaseConnectorId: Database connector ID used.
   :type databaseConnectorId: str
   :param objectName: Database connector's object to write to.
   :type objectName: str
   :param writeMode: `UPSERT` or `INSERT` for writing to the database connector.
   :type writeMode: str
   :param databaseFeatureMapping: Column/feature pairs mapping the features to the database columns.
   :type databaseFeatureMapping: dict
   :param idColumn: ID column to use as the upsert key.
   :type idColumn: str
   :param status: Current status of the export.
   :type status: str
   :param createdAt: Timestamp at which the export was created (ISO-8601 format).
   :type createdAt: str
   :param exportCompletedAt: Timestamp at which the export completed (ISO-8601 format).
   :type exportCompletedAt: str
   :param additionalIdColumns: For database connectors which support it, additional ID columns to use as a complex key for upserting.
   :type additionalIdColumns: list[str]
   :param error: If `status` is `FAILED`, this field will be populated with an error.
   :type error: str
   :param databaseOutputError: If `True`, there were errors reported by the database connector while writing.
   :type databaseOutputError: bool
   :param projectConfig: Project config for this feature group.
   :type projectConfig: ProjectConfig


   .. py:attribute:: feature_group_export_id
      :value: None



   .. py:attribute:: failed_writes
      :value: None



   .. py:attribute:: total_writes
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: connector_type
      :value: None



   .. py:attribute:: output_location
      :value: None



   .. py:attribute:: file_format
      :value: None



   .. py:attribute:: database_connector_id
      :value: None



   .. py:attribute:: object_name
      :value: None



   .. py:attribute:: write_mode
      :value: None



   .. py:attribute:: database_feature_mapping
      :value: None



   .. py:attribute:: id_column
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: export_completed_at
      :value: None



   .. py:attribute:: additional_id_columns
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: database_output_error
      :value: None



   .. py:attribute:: project_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: get_feature_group_version_export_download_url()

      Get a link to download the feature group version.

      :param feature_group_export_id: Unique identifier of the Feature Group Export to get a signed URL for.
      :type feature_group_export_id: str

      :returns: Instance containing the download URL and expiration time for the Feature Group Export.
      :rtype: FeatureGroupExportDownloadUrl



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: FeatureGroupExport



   .. py:method:: describe()

      A feature group export

      :param feature_group_export_id: Unique identifier of the feature group export.
      :type feature_group_export_id: str

      :returns: The feature group export object.
      :rtype: FeatureGroupExport



   .. py:method:: get_connector_errors()

      Returns a stream containing the write errors of the feature group export database connection, if any writes failed to the database connector.

      :param feature_group_export_id: Unique identifier of the feature group export to get the errors for.
      :type feature_group_export_id: str



   .. py:method:: wait_for_results(timeout=3600)

      A waiting call until feature group export is created.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_export(timeout=3600)

      A waiting call until feature group export is created.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the feature group export.

      :returns: A string describing the status of a feature group export (pending, complete, etc.).
      :rtype: str



.. py:class:: FeatureGroupExportConfig(client, outputLocation=None, fileFormat=None, databaseConnectorId=None, objectName=None, writeMode=None, databaseFeatureMapping=None, idColumn=None, additionalIdColumns=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Export configuration (file connector or database connector information) for feature group exports.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param outputLocation: The File Connector location to which the feature group is being written.
   :type outputLocation: str
   :param fileFormat: The file format being written to output_location.
   :type fileFormat: str
   :param databaseConnectorId: The unique string identifier of the database connector used.
   :type databaseConnectorId: str
   :param objectName: The object in the database connector to which the feature group is being written.
   :type objectName: str
   :param writeMode: UPSERT or INSERT for writing to the database connector.
   :type writeMode: str
   :param databaseFeatureMapping: The column/feature pairs mapping the features to the database columns.
   :type databaseFeatureMapping: dict
   :param idColumn: The id column to use as the upsert key.
   :type idColumn: str
   :param additionalIdColumns: For database connectors which support it, additional ID columns to use as a complex key for upserting.
   :type additionalIdColumns: str


   .. py:attribute:: output_location
      :value: None



   .. py:attribute:: file_format
      :value: None



   .. py:attribute:: database_connector_id
      :value: None



   .. py:attribute:: object_name
      :value: None



   .. py:attribute:: write_mode
      :value: None



   .. py:attribute:: database_feature_mapping
      :value: None



   .. py:attribute:: id_column
      :value: None



   .. py:attribute:: additional_id_columns
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupExportDownloadUrl(client, downloadUrl=None, expiresAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Feature Group Export Download Url, which is used to download the feature group version

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param downloadUrl: The URL of the download location.
   :type downloadUrl: str
   :param expiresAt: String representation of the ISO-8601 datetime when the URL expires.
   :type expiresAt: str


   .. py:attribute:: download_url
      :value: None



   .. py:attribute:: expires_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupLineage(client, nodes=None, connections=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Directed acyclic graph of feature group lineage for all feature groups in a project

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param nodes: A list of nodes in the graph containing feature groups and datasets
   :type nodes: list<dict>
   :param connections: A list of connections in the graph between nodes
   :type connections: list<dict>


   .. py:attribute:: nodes
      :value: None



   .. py:attribute:: connections
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupRefreshExportConfig(client, connectorType=None, location=None, exportFileFormat=None, additionalIdColumns=None, databaseFeatureMapping=None, externalConnectionId=None, idColumn=None, objectName=None, writeMode=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Feature Group Connector Export Config outlines the export configuration for a feature group.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param connectorType: The type of connector the feature group is
   :type connectorType: str
   :param location: The file connector location of the feature group export
   :type location: str
   :param exportFileFormat: The file format of the feature group export
   :type exportFileFormat: str
   :param additionalIdColumns: Additional id columns to use for upsert operations
   :type additionalIdColumns: list
   :param databaseFeatureMapping: The mapping of feature names to database columns
   :type databaseFeatureMapping: dict
   :param externalConnectionId: The unique identifier of the external connection to write to
   :type externalConnectionId: str
   :param idColumn: The column to use as the id column for upsert operations
   :type idColumn: str
   :param objectName: The name of the object to write to
   :type objectName: str
   :param writeMode: The write mode to use for the export
   :type writeMode: str


   .. py:attribute:: connector_type
      :value: None



   .. py:attribute:: location
      :value: None



   .. py:attribute:: export_file_format
      :value: None



   .. py:attribute:: additional_id_columns
      :value: None



   .. py:attribute:: database_feature_mapping
      :value: None



   .. py:attribute:: external_connection_id
      :value: None



   .. py:attribute:: id_column
      :value: None



   .. py:attribute:: object_name
      :value: None



   .. py:attribute:: write_mode
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupRow(client, featureGroupId=None, primaryKey=None, createdAt=None, updatedAt=None, contents=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A row of a feature group.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: The ID of the feature group this row belongs to.
   :type featureGroupId: str
   :param primaryKey: Value of the primary key for this row.
   :type primaryKey: str
   :param createdAt: The timestamp this feature group row was created in ISO-8601 format.
   :type createdAt: str
   :param updatedAt: The timestamp when this feature group row was last updated in ISO-8601 format.
   :type updatedAt: str
   :param contents: A dictionary of feature names and values for this row.
   :type contents: dict


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: primary_key
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: contents
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupRowProcess(client, featureGroupId=None, deploymentId=None, primaryKeyValue=None, featureGroupRowProcessId=None, createdAt=None, updatedAt=None, startedAt=None, completedAt=None, timeoutAt=None, retriesRemaining=None, totalAttemptsAllowed=None, status=None, error=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A feature group row process

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: The ID of the feature group this row that was processed belongs to.
   :type featureGroupId: str
   :param deploymentId: The ID of the deployment that processed this row.
   :type deploymentId: str
   :param primaryKeyValue: Value of the primary key for this row.
   :type primaryKeyValue: str
   :param featureGroupRowProcessId: The ID of the feature group row process.
   :type featureGroupRowProcessId: str
   :param createdAt: The timestamp this feature group row was created in ISO-8601 format.
   :type createdAt: str
   :param updatedAt: The timestamp when this feature group row was last updated in ISO-8601 format.
   :type updatedAt: str
   :param startedAt: The timestamp when this feature group row process was started in ISO-8601 format.
   :type startedAt: str
   :param completedAt: The timestamp when this feature group row was completed.
   :type completedAt: str
   :param timeoutAt: The time the feature group row process will timeout.
   :type timeoutAt: str
   :param retriesRemaining: The number of retries remaining for this feature group row process.
   :type retriesRemaining: int
   :param totalAttemptsAllowed: The total number of attempts allowed for this feature group row process.
   :type totalAttemptsAllowed: int
   :param status: The status of the feature group row process.
   :type status: str
   :param error: The error message if the status is FAILED.
   :type error: str


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: primary_key_value
      :value: None



   .. py:attribute:: feature_group_row_process_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: started_at
      :value: None



   .. py:attribute:: completed_at
      :value: None



   .. py:attribute:: timeout_at
      :value: None



   .. py:attribute:: retries_remaining
      :value: None



   .. py:attribute:: total_attempts_allowed
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: wait_for_process(timeout=1200)

      A waiting call until model monitor version is ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the feature group row process.

      :returns: A string describing the status of the feature group row process
      :rtype: str



.. py:class:: FeatureGroupRowProcessLogs(client, logs=None, featureGroupId=None, deploymentId=None, primaryKeyValue=None, featureGroupRowProcessId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs for the feature group row process.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param logs: The logs for both stdout and stderr of the step
   :type logs: str
   :param featureGroupId: The ID of the feature group this row that was processed belongs to.
   :type featureGroupId: str
   :param deploymentId: The ID of the deployment that processed this row.
   :type deploymentId: str
   :param primaryKeyValue: Value of the primary key for this row.
   :type primaryKeyValue: str
   :param featureGroupRowProcessId: The ID of the feature group row process.
   :type featureGroupRowProcessId: str


   .. py:attribute:: logs
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: primary_key_value
      :value: None



   .. py:attribute:: feature_group_row_process_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupRowProcessSummary(client, totalProcesses=None, pendingProcesses=None, processingProcesses=None, completeProcesses=None, failedProcesses=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A summary of the feature group processes for a deployment.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param totalProcesses: The total number of processes
   :type totalProcesses: int
   :param pendingProcesses: The number of pending processes
   :type pendingProcesses: int
   :param processingProcesses: The number of processes currently processing
   :type processingProcesses: int
   :param completeProcesses: The number of complete processes
   :type completeProcesses: int
   :param failedProcesses: The number of failed processes
   :type failedProcesses: int


   .. py:attribute:: total_processes
      :value: None



   .. py:attribute:: pending_processes
      :value: None



   .. py:attribute:: processing_processes
      :value: None



   .. py:attribute:: complete_processes
      :value: None



   .. py:attribute:: failed_processes
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupTemplate(client, featureGroupTemplateId=None, description=None, featureGroupId=None, isSystemTemplate=None, name=None, templateSql=None, templateVariables=None, createdAt=None, updatedAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A template for creating feature groups.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupTemplateId: The unique identifier for this feature group template.
   :type featureGroupTemplateId: str
   :param description: A user-friendly text description of this feature group template.
   :type description: str
   :param featureGroupId: The unique identifier for the feature group used to create this template.
   :type featureGroupId: str
   :param isSystemTemplate: True if this is a system template returned from a user organization.
   :type isSystemTemplate: bool
   :param name: The user-friendly name of this feature group template.
   :type name: str
   :param templateSql: SQL that can include variables which will be replaced by values from the template config to resolve this template SQL into a valid SQL query for a feature group.
   :type templateSql: str
   :param templateVariables: A map, from template variable names to parameters for replacing those template variables with values (e.g. to values and metadata on how to resolve those values).
   :type templateVariables: dict
   :param createdAt: When the feature group template was created.
   :type createdAt: str
   :param updatedAt: When the feature group template was updated.
   :type updatedAt: str


   .. py:attribute:: feature_group_template_id
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: is_system_template
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: template_sql
      :value: None



   .. py:attribute:: template_variables
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: delete()

      Delete an existing feature group template.

      :param feature_group_template_id: Unique string identifier associated with the feature group template.
      :type feature_group_template_id: str



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: FeatureGroupTemplate



   .. py:method:: describe()

      Describe a Feature Group Template.

      :param feature_group_template_id: The unique identifier of a feature group template.
      :type feature_group_template_id: str

      :returns: The feature group template object.
      :rtype: FeatureGroupTemplate



   .. py:method:: update(template_sql = None, template_variables = None, description = None, name = None)

      Update a feature group template.

      :param template_sql: If provided, the new value to use for the template SQL.
      :type template_sql: str
      :param template_variables: If provided, the new value to use for the template variables.
      :type template_variables: list
      :param description: Description of this feature group template.
      :type description: str
      :param name: User-friendly name for this feature group template.
      :type name: str

      :returns: The updated feature group template.
      :rtype: FeatureGroupTemplate



   .. py:method:: preview_resolution(template_bindings = None, template_sql = None, template_variables = None, should_validate = True)

      Resolve template sql using template variables and template bindings.

      :param template_bindings: Values to override the template variable values specified by the template.
      :type template_bindings: list
      :param template_sql: If specified, use this as the template SQL instead of the feature group template's SQL.
      :type template_sql: str
      :param template_variables: Template variables to use. If a template is provided, this overrides the template's template variables.
      :type template_variables: list
      :param should_validate: If true, validates the resolved SQL.
      :type should_validate: bool

      :returns: The resolved template
      :rtype: ResolvedFeatureGroupTemplate



.. py:class:: FeatureGroupTemplateVariableOptions(client, templateVariableOptions=None, userFeedback=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Feature Group Template Variable Options

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param templateVariableOptions: List of values we can select for different template variables.
   :type templateVariableOptions: list[dict]
   :param userFeedback: List of additional information regarding variable options for the user.
   :type userFeedback: list[str]


   .. py:attribute:: template_variable_options
      :value: None



   .. py:attribute:: user_feedback
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureGroupVersion(client, featureGroupVersion=None, featureGroupId=None, sql=None, sourceTables=None, sourceDatasetVersions=None, createdAt=None, status=None, error=None, deployable=None, cpuSize=None, memory=None, useOriginalCsvNames=None, pythonFunctionBindings=None, indexingConfigWarningMsg=None, materializationStartedAt=None, materializationCompletedAt=None, columns=None, templateBindings=None, features={}, pointInTimeGroups={}, codeSource={}, annotationConfig={}, indexingConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A materialized version of a feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupVersion: The unique identifier for this materialized version of feature group.
   :type featureGroupVersion: str
   :param featureGroupId: The unique identifier of the feature group this version belongs to.
   :type featureGroupId: str
   :param sql: The sql definition creating this feature group.
   :type sql: str
   :param sourceTables: The source tables for this feature group.
   :type sourceTables: list[str]
   :param sourceDatasetVersions: The dataset version ids for this feature group version.
   :type sourceDatasetVersions: list[str]
   :param createdAt: The timestamp at which the feature group version was created.
   :type createdAt: str
   :param status: The current status of the feature group version.
   :type status: str
   :param error: Relevant error if the status is FAILED.
   :type error: str
   :param deployable: whether feature group is deployable or not.
   :type deployable: bool
   :param cpuSize: Cpu size specified for the python feature group.
   :type cpuSize: str
   :param memory: Memory in GB specified for the python feature group.
   :type memory: int
   :param useOriginalCsvNames: If true, the feature group will use the original column names in the source dataset.
   :type useOriginalCsvNames: bool
   :param pythonFunctionBindings: Config specifying variable names, types, and values to use when resolving a Python feature group.
   :type pythonFunctionBindings: list
   :param indexingConfigWarningMsg: The warning message related to indexing keys.
   :type indexingConfigWarningMsg: str
   :param materializationStartedAt: The timestamp at which the feature group materialization started.
   :type materializationStartedAt: str
   :param materializationCompletedAt: The timestamp at which the feature group materialization completed.
   :type materializationCompletedAt: str
   :param columns: List of resolved columns.
   :type columns: list[feature]
   :param templateBindings: Template variable bindings used for resolving the template.
   :type templateBindings: list
   :param features: List of features.
   :type features: Feature
   :param pointInTimeGroups: List of Point In Time Groups
   :type pointInTimeGroups: PointInTimeGroup
   :param codeSource: If a python feature group, information on the source code
   :type codeSource: CodeSource
   :param annotationConfig: The annotations config for the feature group.
   :type annotationConfig: AnnotationConfig
   :param indexingConfig: The indexing config for the feature group.
   :type indexingConfig: IndexingConfig


   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: sql
      :value: None



   .. py:attribute:: source_tables
      :value: None



   .. py:attribute:: source_dataset_versions
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deployable
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: use_original_csv_names
      :value: None



   .. py:attribute:: python_function_bindings
      :value: None



   .. py:attribute:: indexing_config_warning_msg
      :value: None



   .. py:attribute:: materialization_started_at
      :value: None



   .. py:attribute:: materialization_completed_at
      :value: None



   .. py:attribute:: columns
      :value: None



   .. py:attribute:: template_bindings
      :value: None



   .. py:attribute:: features


   .. py:attribute:: point_in_time_groups


   .. py:attribute:: code_source


   .. py:attribute:: annotation_config


   .. py:attribute:: indexing_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: create_snapshot_feature_group(table_name)

      Creates a Snapshot Feature Group corresponding to a specific Feature Group version.

      :param table_name: Name for the newly created Snapshot Feature Group table. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type table_name: str

      :returns: Feature Group corresponding to the newly created Snapshot.
      :rtype: FeatureGroup



   .. py:method:: export_to_file_connector(location, export_file_format, overwrite = False)

      Export Feature group to File Connector.

      :param location: Cloud file location to export to.
      :type location: str
      :param export_file_format: Enum string specifying the file format to export to.
      :type export_file_format: str
      :param overwrite: If true and a file exists at this location, this process will overwrite the file.
      :type overwrite: bool

      :returns: The FeatureGroupExport instance.
      :rtype: FeatureGroupExport



   .. py:method:: export_to_database_connector(database_connector_id, object_name, write_mode, database_feature_mapping, id_column = None, additional_id_columns = None)

      Export Feature group to Database Connector.

      :param database_connector_id: Unique string identifier for the Database Connector to export to.
      :type database_connector_id: str
      :param object_name: Name of the database object to write to.
      :type object_name: str
      :param write_mode: Enum string indicating whether to use INSERT or UPSERT.
      :type write_mode: str
      :param database_feature_mapping: Key/value pair JSON object of "database connector column" -> "feature name" pairs.
      :type database_feature_mapping: dict
      :param id_column: Required if write_mode is UPSERT. Indicates which database column should be used as the lookup key.
      :type id_column: str
      :param additional_id_columns: For database connectors which support it, additional ID columns to use as a complex key for upserting.
      :type additional_id_columns: list

      :returns: The FeatureGroupExport instance.
      :rtype: FeatureGroupExport



   .. py:method:: export_to_console(export_file_format)

      Export Feature group to console.

      :param export_file_format: File format to export to.
      :type export_file_format: str

      :returns: The FeatureGroupExport instance.
      :rtype: FeatureGroupExport



   .. py:method:: delete()

      Deletes a Feature Group Version.

      :param feature_group_version: String identifier for the feature group version to be removed.
      :type feature_group_version: str



   .. py:method:: get_materialization_logs(stdout = False, stderr = False)

      Returns logs for a materialized feature group version.

      :param stdout: Set to True to get info logs.
      :type stdout: bool
      :param stderr: Set to True to get error logs.
      :type stderr: bool

      :returns: A function logs object.
      :rtype: FunctionLogs



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: FeatureGroupVersion



   .. py:method:: describe()

      Describe a feature group version.

      :param feature_group_version: The unique identifier associated with the feature group version.
      :type feature_group_version: str

      :returns: The feature group version.
      :rtype: FeatureGroupVersion



   .. py:method:: get_metrics(selected_columns = None, include_charts = False, include_statistics = True)

      Get metrics for a specific feature group version.

      :param selected_columns: A list of columns to order first.
      :type selected_columns: List
      :param include_charts: A flag indicating whether charts should be included in the response. Default is false.
      :type include_charts: bool
      :param include_statistics: A flag indicating whether statistics should be included in the response. Default is true.
      :type include_statistics: bool

      :returns: The metrics for the specified feature group version.
      :rtype: DataMetrics



   .. py:method:: get_logs()

      Retrieves the feature group materialization logs.

      :param feature_group_version: The unique version ID of the feature group version.
      :type feature_group_version: str

      :returns: The logs for the specified feature group version.
      :rtype: FeatureGroupVersionLogs



   .. py:method:: wait_for_results(timeout=3600)

      A waiting call until feature group version is materialized

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_materialization(timeout=3600)

      A waiting call until feature group version is materialized.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the feature group version.

      :returns: A string describing the status of a feature group version (pending, complete, etc.).
      :rtype: str



   .. py:method:: _download_avro_file(file_part, tmp_dir, part_index)


   .. py:method:: load_as_pandas(max_workers=10)

      Loads the feature group version into a pandas dataframe.

      :param max_workers: The number of threads.
      :type max_workers: int

      :returns: A pandas dataframe displaying the data in the feature group version.
      :rtype: DataFrame



   .. py:method:: load_as_pandas_documents(doc_id_column = 'doc_id', document_column = 'page_infos', max_workers=10)

      Loads a feature group with documents data into a pandas dataframe.

      :param doc_id_column: The name of the feature / column containing the document ID.
      :type doc_id_column: str
      :param document_column: The name of the feature / column which either contains the document data itself or page infos with path to remotely stored documents. This column will be replaced with the extracted document data.
      :type document_column: str
      :param max_workers: The number of threads.
      :type max_workers: int

      :returns: A pandas dataframe containing the extracted document data.
      :rtype: DataFrame



.. py:class:: FeatureGroupVersionLogs(client, logs=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs from feature group version.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param logs: List of logs from feature group version.
   :type logs: list[str]


   .. py:attribute:: logs
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureImportance(client, shapFeatureImportance=None, limeFeatureImportance=None, permutationFeatureImportance=None, nullFeatureImportance=None, lofoFeatureImportance=None, ebmFeatureImportance=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Feature importance for a specified model monitor

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param shapFeatureImportance: A map of feature name to feature importance, determined by Shap values on a sample dataset.
   :type shapFeatureImportance: dict
   :param limeFeatureImportance: A map of feature name to feature importance, determined by Lime contribution values on a sample dataset.
   :type limeFeatureImportance: dict
   :param permutationFeatureImportance: A map of feature name to feature importance, determined by permutation importance.
   :type permutationFeatureImportance: dict
   :param nullFeatureImportance: A map of feature name to feature importance, determined by null feature importance.
   :type nullFeatureImportance: dict
   :param lofoFeatureImportance: A map of feature name to feature importance, determined by the Leave One Feature Out method.
   :type lofoFeatureImportance: dict
   :param ebmFeatureImportance: A map of feature name to feature importance, determined by an Explainable Boosting Machine.
   :type ebmFeatureImportance: dict


   .. py:attribute:: shap_feature_importance
      :value: None



   .. py:attribute:: lime_feature_importance
      :value: None



   .. py:attribute:: permutation_feature_importance
      :value: None



   .. py:attribute:: null_feature_importance
      :value: None



   .. py:attribute:: lofo_feature_importance
      :value: None



   .. py:attribute:: ebm_feature_importance
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeatureMapping(client, featureMapping=None, featureName=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A description of the data use for a feature

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
   :type featureMapping: str
   :param featureName: The unique name of the feature.
   :type featureName: str


   .. py:attribute:: feature_mapping
      :value: None



   .. py:attribute:: feature_name
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FeaturePerformanceAnalysis(client, features=None, featureMetrics=None, metricsKeys=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A feature performance analysis for Monitor

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param features: A list of the features that are being analyzed.
   :type features: list
   :param featureMetrics: A list of dictionary for every feature and its metrics
   :type featureMetrics: list
   :param metricsKeys: A list of the keys for the metrics.
   :type metricsKeys: list


   .. py:attribute:: features
      :value: None



   .. py:attribute:: feature_metrics
      :value: None



   .. py:attribute:: metrics_keys
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FileConnector(client, bucket=None, verified=None, writePermission=None, authExpiresAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Verification result for an external storage service

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param bucket: The address of the bucket. eg., `s3://your-bucket`
   :type bucket: str
   :param verified: `true` if the bucket has passed verification
   :type verified: bool
   :param writePermission: `true` if Abacus.AI has permission to write to this bucket
   :type writePermission: bool
   :param authExpiresAt: The time when the file connector's auth expires, if applicable
   :type authExpiresAt: str


   .. py:attribute:: bucket
      :value: None



   .. py:attribute:: verified
      :value: None



   .. py:attribute:: write_permission
      :value: None



   .. py:attribute:: auth_expires_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FileConnectorInstructions(client, verified=None, writePermission=None, authOptions=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An object with a full description of the cloud storage bucket authentication options and bucket policy. Returns an error message if the parameters are invalid.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param verified: `True` if the bucket has passed verification
   :type verified: bool
   :param writePermission: `True` if Abacus.AI has permission to write to this bucket
   :type writePermission: bool
   :param authOptions: A list of options for giving Abacus.AI access to this bucket
   :type authOptions: list[dict]


   .. py:attribute:: verified
      :value: None



   .. py:attribute:: write_permission
      :value: None



   .. py:attribute:: auth_options
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FileConnectorVerification(client, verified=None, writePermission=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   To verify the file connector

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param verified: `true` if the bucket has passed verification
   :type verified: bool
   :param writePermission: `true` if Abacus.AI has permission to write to this bucket
   :type writePermission: bool


   .. py:attribute:: verified
      :value: None



   .. py:attribute:: write_permission
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FinetunedPretrainedModel(client, name=None, finetunedPretrainedModelId=None, finetunedPretrainedModelVersion=None, createdAt=None, updatedAt=None, config=None, baseModel=None, finetuningDatasetVersion=None, status=None, error=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A finetuned pretrained model

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The user-friendly name for the model.
   :type name: str
   :param finetunedPretrainedModelId: The unique identifier of the model.
   :type finetunedPretrainedModelId: str
   :param finetunedPretrainedModelVersion: The unique identifier of the model version.
   :type finetunedPretrainedModelVersion: str
   :param createdAt: When the finetuned pretrained model was created.
   :type createdAt: str
   :param updatedAt: When the finetuned pretrained model was last updated.
   :type updatedAt: str
   :param config: The finetuned pretrained model configuration
   :type config: dict
   :param baseModel: The pretrained base model for fine tuning
   :type baseModel: str
   :param finetuningDatasetVersion: The finetuned dataset instance id of the model.
   :type finetuningDatasetVersion: str
   :param status: The current status of the finetuned pretrained model.
   :type status: str
   :param error: Relevant error if the status is FAILED.
   :type error: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: finetuned_pretrained_model_id
      :value: None



   .. py:attribute:: finetuned_pretrained_model_version
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: config
      :value: None



   .. py:attribute:: base_model
      :value: None



   .. py:attribute:: finetuning_dataset_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ForecastingAnalysisGraphData(client, data=None, xAxis=None, yAxis=None, dataColumns=None, chartName=None, chartTypes=None, itemStatistics={}, chartDescriptions={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Forecasting Analysis Graph Data representation.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param data: List of graph data
   :type data: list
   :param xAxis: Feature that represents the x axis
   :type xAxis: str
   :param yAxis: Feature that represents the y axis
   :type yAxis: str
   :param dataColumns: Ordered name of the column for each rowwise data
   :type dataColumns: list
   :param chartName: Name of the chart represented by the data
   :type chartName: str
   :param chartTypes: Type of charts in that can exist in the current data.
   :type chartTypes: list
   :param itemStatistics: In item wise charts, gives the mean, median, count, missing_percent, p10, p90, standard_deviation, min, max
   :type itemStatistics: ItemStatistics
   :param chartDescriptions: List of descriptions of what the chart contains
   :type chartDescriptions: EdaChartDescription


   .. py:attribute:: data
      :value: None



   .. py:attribute:: x_axis
      :value: None



   .. py:attribute:: y_axis
      :value: None



   .. py:attribute:: data_columns
      :value: None



   .. py:attribute:: chart_name
      :value: None



   .. py:attribute:: chart_types
      :value: None



   .. py:attribute:: item_statistics


   .. py:attribute:: chart_descriptions


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ForecastingMonitorItemAnalysis(client, predictionItemAnalysis={}, trainingItemAnalysis={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Forecasting Monitor Item Analysis of the latest version of the data.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param predictionItemAnalysis: Data showing average, p10, p90, median sales across time for prediction data
   :type predictionItemAnalysis: ForecastingAnalysisGraphData
   :param trainingItemAnalysis: Data showing average, p10, p90, median sales across time for training data
   :type trainingItemAnalysis: ForecastingAnalysisGraphData


   .. py:attribute:: prediction_item_analysis


   .. py:attribute:: training_item_analysis


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ForecastingMonitorSummary(client, predictionTimestampCol=None, predictionTargetCol=None, trainingTimestampCol=None, trainingTargetCol=None, predictionItemId=None, trainingItemId=None, forecastFrequency=None, trainingTargetAcrossTime={}, predictionTargetAcrossTime={}, actualsHistogram={}, predictionsHistogram={}, trainHistoryData={}, predictHistoryData={}, targetDrift={}, historyDrift={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Forecasting Monitor Summary of the latest version of the data.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param predictionTimestampCol: Feature in the data that represents the timestamp column.
   :type predictionTimestampCol: str
   :param predictionTargetCol: Feature in the data that represents the target.
   :type predictionTargetCol: str
   :param trainingTimestampCol: Feature in the data that represents the timestamp column.
   :type trainingTimestampCol: str
   :param trainingTargetCol: Feature in the data that represents the target.
   :type trainingTargetCol: str
   :param predictionItemId: Feature in the data that represents the item id.
   :type predictionItemId: str
   :param trainingItemId: Feature in the data that represents the item id.
   :type trainingItemId: str
   :param forecastFrequency: Frequency of data, could be hourly, daily, weekly, monthly, quarterly or yearly.
   :type forecastFrequency: str
   :param trainingTargetAcrossTime: Data showing average, p10, p90, median sales across time
   :type trainingTargetAcrossTime: ForecastingAnalysisGraphData
   :param predictionTargetAcrossTime: Data showing average, p10, p90, median sales across time
   :type predictionTargetAcrossTime: ForecastingAnalysisGraphData
   :param actualsHistogram: Data showing actuals histogram
   :type actualsHistogram: ForecastingAnalysisGraphData
   :param predictionsHistogram: Data showing predictions histogram
   :type predictionsHistogram: ForecastingAnalysisGraphData
   :param trainHistoryData: Data showing length of history distribution
   :type trainHistoryData: ForecastingAnalysisGraphData
   :param predictHistoryData: Data showing length of history distribution
   :type predictHistoryData: ForecastingAnalysisGraphData
   :param targetDrift: Data showing drift of the target for all drift types: distance (KL divergence), js_distance, ws_distance, ks_statistic, psi, csi, chi_square
   :type targetDrift: FeatureDriftRecord
   :param historyDrift: Data showing drift of the history for all drift types: distance (KL divergence), js_distance, ws_distance, ks_statistic, psi, csi, chi_square
   :type historyDrift: FeatureDriftRecord


   .. py:attribute:: prediction_timestamp_col
      :value: None



   .. py:attribute:: prediction_target_col
      :value: None



   .. py:attribute:: training_timestamp_col
      :value: None



   .. py:attribute:: training_target_col
      :value: None



   .. py:attribute:: prediction_item_id
      :value: None



   .. py:attribute:: training_item_id
      :value: None



   .. py:attribute:: forecast_frequency
      :value: None



   .. py:attribute:: training_target_across_time


   .. py:attribute:: prediction_target_across_time


   .. py:attribute:: actuals_histogram


   .. py:attribute:: predictions_histogram


   .. py:attribute:: train_history_data


   .. py:attribute:: predict_history_data


   .. py:attribute:: target_drift


   .. py:attribute:: history_drift


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FsEntry(client, name=None, type=None, path=None, size=None, modified=None, isFolderEmpty=None, children=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   File system entry.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the file/folder
   :type name: str
   :param type: The type of entry (file/folder)
   :type type: str
   :param path: The path of the entry
   :type path: str
   :param size: The size of the entry in bytes
   :type size: int
   :param modified: The last modified timestamp
   :type modified: int
   :param isFolderEmpty: Whether the folder is empty (only for folders)
   :type isFolderEmpty: bool
   :param children: List of child FSEntry objects (only for folders)
   :type children: list


   .. py:attribute:: name
      :value: None



   .. py:attribute:: type
      :value: None



   .. py:attribute:: path
      :value: None



   .. py:attribute:: size
      :value: None



   .. py:attribute:: modified
      :value: None



   .. py:attribute:: isFolderEmpty
      :value: None



   .. py:attribute:: children
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: FunctionLogs(client, function=None, stats=None, stdout=None, stderr=None, algorithm=None, exception={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs from an invocation of a function.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param function: The function this is logging
   :type function: str
   :param stats: Statistics for the start and end time execution for this function
   :type stats: dict
   :param stdout: Standard out logs
   :type stdout: str
   :param stderr: Standard error logs
   :type stderr: str
   :param algorithm: Algorithm name for this function
   :type algorithm: str
   :param exception: The exception stacktrace
   :type exception: UserException


   .. py:attribute:: function
      :value: None



   .. py:attribute:: stats
      :value: None



   .. py:attribute:: stdout
      :value: None



   .. py:attribute:: stderr
      :value: None



   .. py:attribute:: algorithm
      :value: None



   .. py:attribute:: exception


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: GeneratedPitFeatureConfigOption(client, name=None, displayName=None, default=None, description=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The options to display for possible generated PIT aggregation functions

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The short name of the aggregation type.
   :type name: str
   :param displayName: The display name of the aggregation type.
   :type displayName: str
   :param default: The default value for the option.
   :type default: bool
   :param description: The description of the aggregation type.
   :type description: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: display_name
      :value: None



   .. py:attribute:: default
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: GraphDashboard(client, name=None, graphDashboardId=None, createdAt=None, projectId=None, pythonFunctionIds=None, plotReferenceIds=None, pythonFunctionNames=None, projectName=None, description=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Graph Dashboard

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The user-friendly name for the graph dashboard.
   :type name: str
   :param graphDashboardId: The unique identifier of the graph dashboard.
   :type graphDashboardId: str
   :param createdAt: Date and time at which the graph dashboard was created, in ISO-8601 format.
   :type createdAt: str
   :param projectId: The unique identifier of the project this graph dashboard belongs to.
   :type projectId: str
   :param pythonFunctionIds: List of Python function IDs included in the dashboard.
   :type pythonFunctionIds: list[str]
   :param plotReferenceIds: List of the graph reference IDs for the plots to the dashboard.
   :type plotReferenceIds: list[str]
   :param pythonFunctionNames: List of names of each of the plots to the dashboard.
   :type pythonFunctionNames: list[str]
   :param projectName: The name the graph dashboard belongs to.
   :type projectName: str
   :param description: The description of the graph dashboard.
   :type description: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: graph_dashboard_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: python_function_ids
      :value: None



   .. py:attribute:: plot_reference_ids
      :value: None



   .. py:attribute:: python_function_names
      :value: None



   .. py:attribute:: project_name
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: GraphDashboard



   .. py:method:: describe()

      Describes a given graph dashboard.

      :param graph_dashboard_id: Unique identifier for the graph dashboard.
      :type graph_dashboard_id: str

      :returns: An object containing information about the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: delete()

      Deletes a graph dashboard

      :param graph_dashboard_id: Unique string identifier for the graph dashboard to be deleted.
      :type graph_dashboard_id: str



   .. py:method:: update(name = None, python_function_ids = None)

      Updates a graph dashboard

      :param name: Name of the dashboard.
      :type name: str
      :param python_function_ids: List of unique string identifiers for the Python functions to be used in the graph dashboard.
      :type python_function_ids: List

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



.. py:class:: HoldoutAnalysis(client, holdoutAnalysisId=None, name=None, featureGroupIds=None, modelId=None, modelName=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A holdout analysis object.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param holdoutAnalysisId: The unique identifier of the holdout analysis.
   :type holdoutAnalysisId: str
   :param name: The name of the holdout analysis.
   :type name: str
   :param featureGroupIds: The feature group ids associated with the holdout analysis.
   :type featureGroupIds: list[str]
   :param modelId: The model id associated with the holdout analysis.
   :type modelId: str
   :param modelName: The model name associated with the holdout analysis.
   :type modelName: str


   .. py:attribute:: holdout_analysis_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: feature_group_ids
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_name
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: rerun(model_version = None, algorithm = None)

      Rerun a holdout analysis. A different model version and algorithm can be specified which should be under the same model.

      :param model_version: (optional) Version of the model to use for the holdout analysis
      :type model_version: str
      :param algorithm: (optional) ID of algorithm to use for the holdout analysis
      :type algorithm: str

      :returns: The created holdout analysis version
      :rtype: HoldoutAnalysisVersion



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: HoldoutAnalysis



   .. py:method:: describe()

      Get a holdout analysis.

      :param holdout_analysis_id: ID of the holdout analysis to get
      :type holdout_analysis_id: str

      :returns: The holdout analysis
      :rtype: HoldoutAnalysis



   .. py:method:: list_versions()

      List holdout analysis versions for a holdout analysis.

      :param holdout_analysis_id: ID of the holdout analysis to list holdout analysis versions for
      :type holdout_analysis_id: str

      :returns: The holdout analysis versions
      :rtype: list[HoldoutAnalysisVersion]



.. py:class:: HoldoutAnalysisVersion(client, holdoutAnalysisVersion=None, holdoutAnalysisId=None, createdAt=None, status=None, error=None, modelId=None, modelVersion=None, algorithm=None, algoName=None, metrics=None, metricInfos=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A holdout analysis version object.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param holdoutAnalysisVersion: The unique identifier of the holdout analysis version.
   :type holdoutAnalysisVersion: str
   :param holdoutAnalysisId: The unique identifier of the holdout analysis.
   :type holdoutAnalysisId: str
   :param createdAt: The timestamp at which the holdout analysis version was created.
   :type createdAt: str
   :param status: The status of the holdout analysis version.
   :type status: str
   :param error: The error message if the status is FAILED.
   :type error: str
   :param modelId: The model id associated with the holdout analysis.
   :type modelId: str
   :param modelVersion: The model version associated with the holdout analysis.
   :type modelVersion: str
   :param algorithm: The algorithm used to train the model.
   :type algorithm: str
   :param algoName: The name of the algorithm used to train the model.
   :type algoName: str
   :param metrics: The metrics of the holdout analysis version.
   :type metrics: dict
   :param metricInfos: The metric infos of the holdout analysis version.
   :type metricInfos: dict


   .. py:attribute:: holdout_analysis_version
      :value: None



   .. py:attribute:: holdout_analysis_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: algorithm
      :value: None



   .. py:attribute:: algo_name
      :value: None



   .. py:attribute:: metrics
      :value: None



   .. py:attribute:: metric_infos
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: HoldoutAnalysisVersion



   .. py:method:: describe(get_metrics = False)

      Get a holdout analysis version.

      :param get_metrics: (optional) Whether to get the metrics for the holdout analysis version
      :type get_metrics: bool

      :returns: The holdout analysis version
      :rtype: HoldoutAnalysisVersion



   .. py:method:: wait_for_results(timeout=3600)

      A waiting call until holdout analysis for the version is complete

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the holdout analysis version.

      :returns: A string describing the status of a holdout analysis version (pending, complete, etc.).
      :rtype: str



.. py:class:: HostedApp(client, hostedAppId=None, deploymentConversationId=None, name=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Hosted App

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param hostedAppId: The ID of the hosted app
   :type hostedAppId: id
   :param deploymentConversationId: The ID of the deployment conversation
   :type deploymentConversationId: id
   :param name: The name of the hosted app
   :type name: str
   :param createdAt: The creation timestamp
   :type createdAt: str


   .. py:attribute:: hosted_app_id
      :value: None



   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: HostedAppContainer(client, hostedAppContainerId=None, hostedAppId=None, deploymentConversationId=None, hostedAppVersion=None, name=None, createdAt=None, updatedAt=None, containerImage=None, route=None, appConfig=None, isDev=None, lifecycle=None, status=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Hosted app container information.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param hostedAppContainerId: The ID of the hosted app container
   :type hostedAppContainerId: id
   :param hostedAppId: The ID of the hosted app
   :type hostedAppId: id
   :param deploymentConversationId: The deployment conversation ID
   :type deploymentConversationId: id
   :param hostedAppVersion: The instance of the hosted app
   :type hostedAppVersion: id
   :param name: The name of the hosted app
   :type name: str
   :param createdAt: Creation timestamp
   :type createdAt: str
   :param updatedAt: Last update timestamp
   :type updatedAt: str
   :param containerImage: Container image name
   :type containerImage: str
   :param route: Container route
   :type route: str
   :param appConfig: App configuration
   :type appConfig: dict
   :param isDev: Whether this is a dev container
   :type isDev: bool
   :param lifecycle: Container lifecycle status (PENDING/DEPLOYING/ACTIVE/FAILED/STOPPED/DELETING)
   :type lifecycle: str
   :param status: Container status (RUNNING/STOPPED/DEPLOYING/FAILED)
   :type status: str


   .. py:attribute:: hosted_app_container_id
      :value: None



   .. py:attribute:: hosted_app_id
      :value: None



   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: hosted_app_version
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: container_image
      :value: None



   .. py:attribute:: route
      :value: None



   .. py:attribute:: app_config
      :value: None



   .. py:attribute:: is_dev
      :value: None



   .. py:attribute:: lifecycle
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: HostedAppFileRead(client, content=None, start=None, end=None, retcode=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Result of reading file content from a hosted app container.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param content: The contents of the file or a portion of it.
   :type content: str
   :param start: If present, the starting position of the read.
   :type start: int
   :param end: If present, the last position in the file returned in this read.
   :type end: int
   :param retcode: If the read is associated with a log the return code of the command.
   :type retcode: int


   .. py:attribute:: content
      :value: None



   .. py:attribute:: start
      :value: None



   .. py:attribute:: end
      :value: None



   .. py:attribute:: retcode
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: HostedModelToken(client, createdAt=None, tag=None, trailingAuthToken=None, hostedModelTokenId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A hosted model authentication token that is used to authenticate requests to an abacus hosted model

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param createdAt: When the token was created
   :type createdAt: str
   :param tag: A user-friendly tag for the API key.
   :type tag: str
   :param trailingAuthToken: The last four characters of the un-encrypted auth token
   :type trailingAuthToken: str
   :param hostedModelTokenId: The unique identifier attached to this authenticaion token
   :type hostedModelTokenId: str


   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: tag
      :value: None



   .. py:attribute:: trailing_auth_token
      :value: None



   .. py:attribute:: hosted_model_token_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: HumeVoice(client, name=None, gender=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Hume Voice

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the voice.
   :type name: str
   :param gender: The gender of the voice.
   :type gender: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: gender
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ImageGenSettings(client, model=None, settings=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Image generation settings

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param model: Dropdown for models available for image generation.
   :type model: dict
   :param settings: The settings for each model.
   :type settings: dict


   .. py:attribute:: model
      :value: None



   .. py:attribute:: settings
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: IndexingConfig(client, primaryKey=None, updateTimestampKey=None, lookupKeys=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The indexing config for a Feature Group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param primaryKey: A single key index
   :type primaryKey: str
   :param updateTimestampKey: The primary timestamp feature
   :type updateTimestampKey: str
   :param lookupKeys: A multi-key index. Cannot be used in conjuction with primary key.
   :type lookupKeys: list[str]


   .. py:attribute:: primary_key
      :value: None



   .. py:attribute:: update_timestamp_key
      :value: None



   .. py:attribute:: lookup_keys
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: InferredDatabaseColumnToFeatureMappings(client, databaseColumnToFeatureMappings={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Autocomplete mappings for database to connector columns

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param databaseColumnToFeatureMappings: Database columns feature mappings
   :type databaseColumnToFeatureMappings: DatabaseColumnFeatureMapping


   .. py:attribute:: database_column_to_feature_mappings


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: InferredFeatureMappings(client, error=None, featureMappings={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A description of the data use for a feature

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param error: Error message if there was an error inferring the feature mappings
   :type error: str
   :param featureMappings: The inferred feature mappings
   :type featureMappings: FeatureMapping


   .. py:attribute:: error
      :value: None



   .. py:attribute:: feature_mappings


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ItemStatistics(client, missingPercent=None, count=None, median=None, mean=None, p10=None, p90=None, stddev=None, min=None, max=None, lowerBound=None, upperBound=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   ItemStatistics representation.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param missingPercent: percentage of missing values in data
   :type missingPercent: float
   :param count: count of data
   :type count: int
   :param median: median of the data
   :type median: float
   :param mean: mean value of the data
   :type mean: float
   :param p10: 10th percentile of the data
   :type p10: float
   :param p90: 90th_percentile of the data
   :type p90: float
   :param stddev: standard deviation of the data
   :type stddev: float
   :param min: min value in the data
   :type min: int
   :param max: max value in the data
   :type max: int
   :param lowerBound: lower bound threshold of the data
   :type lowerBound: float
   :param upperBound: upper bound threshold of the data
   :type upperBound: float


   .. py:attribute:: missing_percent
      :value: None



   .. py:attribute:: count
      :value: None



   .. py:attribute:: median
      :value: None



   .. py:attribute:: mean
      :value: None



   .. py:attribute:: p10
      :value: None



   .. py:attribute:: p90
      :value: None



   .. py:attribute:: stddev
      :value: None



   .. py:attribute:: min
      :value: None



   .. py:attribute:: max
      :value: None



   .. py:attribute:: lower_bound
      :value: None



   .. py:attribute:: upper_bound
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmApp(client, llmAppId=None, name=None, description=None, projectId=None, deploymentId=None, createdAt=None, updatedAt=None, status=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An LLM App that can be used for generation. LLM Apps are specifically crafted to help with certain tasks like code generation or question answering.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param llmAppId: The unique identifier of the LLM App.
   :type llmAppId: str
   :param name: The name of the LLM App.
   :type name: str
   :param description: The description of the LLM App.
   :type description: str
   :param projectId: The project ID of the deployment associated with the LLM App.
   :type projectId: str
   :param deploymentId: The deployment ID associated with the LLM App.
   :type deploymentId: str
   :param createdAt: The timestamp at which the LLM App was created.
   :type createdAt: str
   :param updatedAt: The timestamp at which the LLM App was updated.
   :type updatedAt: str
   :param status: The status of the LLM App's deployment.
   :type status: str


   .. py:attribute:: llm_app_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmCodeBlock(client, language=None, code=None, start=None, end=None, valid=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Parsed code block from an LLM response

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param language: The language of the code block. Eg - python/sql/etc.
   :type language: str
   :param code: source code string
   :type code: str
   :param start: index of the starting character of the code block in the original response
   :type start: int
   :param end: index of the last character of the code block in the original response
   :type end: int
   :param valid: flag denoting whether the soruce code string is syntactically valid
   :type valid: bool


   .. py:attribute:: language
      :value: None



   .. py:attribute:: code
      :value: None



   .. py:attribute:: start
      :value: None



   .. py:attribute:: end
      :value: None



   .. py:attribute:: valid
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmExecutionPreview(client, error=None, sql=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Preview of executing queries using LLM.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param error: The error message if the preview failed.
   :type error: str
   :param sql: Preview of SQL query generated by LLM.
   :type sql: str


   .. py:attribute:: error
      :value: None



   .. py:attribute:: sql
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmExecutionResult(client, status=None, error=None, execution={}, preview={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Results of executing queries using LLM.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param status: The status of the execution.
   :type status: str
   :param error: The error message if the execution failed.
   :type error: str
   :param execution: Information on execution of the query.
   :type execution: ExecuteFeatureGroupOperation
   :param preview: Preview of executing queries using LLM.
   :type preview: LlmExecutionPreview


   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: execution


   .. py:attribute:: preview


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmGeneratedCode(client, sql=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Code generated by LLM.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param sql: SQL query generated by LLM.
   :type sql: str


   .. py:attribute:: sql
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmInput(client, content=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The result of encoding an object as input for a language model.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param content: Content of the response
   :type content: str


   .. py:attribute:: content
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmParameters(client, parameters=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The parameters of LLM for given inputs.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param parameters: The parameters of LLM for given inputs.
   :type parameters: dict


   .. py:attribute:: parameters
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: LlmResponse(client, content=None, tokens=None, stopReason=None, llmName=None, inputTokens=None, outputTokens=None, totalTokens=None, codeBlocks={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The response returned by LLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param content: Full response from LLM.
   :type content: str
   :param tokens: The number of tokens in the response.
   :type tokens: int
   :param stopReason: The reason due to which the response generation stopped.
   :type stopReason: str
   :param llmName: The name of the LLM model used to generate the response.
   :type llmName: str
   :param inputTokens: The number of input tokens used in the LLM call.
   :type inputTokens: int
   :param outputTokens: The number of output tokens generated in the LLM response.
   :type outputTokens: int
   :param totalTokens: The total number of tokens (input + output) used in the LLM interaction.
   :type totalTokens: int
   :param codeBlocks: A list of parsed code blocks from raw LLM Response
   :type codeBlocks: LlmCodeBlock


   .. py:attribute:: content
      :value: None



   .. py:attribute:: tokens
      :value: None



   .. py:attribute:: stop_reason
      :value: None



   .. py:attribute:: llm_name
      :value: None



   .. py:attribute:: input_tokens
      :value: None



   .. py:attribute:: output_tokens
      :value: None



   .. py:attribute:: total_tokens
      :value: None



   .. py:attribute:: code_blocks


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: MemoryOptions(client, cpu={}, gpu={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The overall memory options for executing a job

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param cpu: Contains information about the default CPU and list of CPU memory & size options
   :type cpu: CpuGpuMemorySpecs
   :param gpu: Contains information about the default GPU and list of GPU memory & size options
   :type gpu: CpuGpuMemorySpecs


   .. py:attribute:: cpu


   .. py:attribute:: gpu


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: MessagingConnectorResponse(client, welcomeMessage=None, defaultMessage=None, disclaimer=None, messagingBotName=None, useDefaultLabel=None, initAckReq=None, defaultLabels=None, enabledExternalLinks=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The response to view label data for Teams

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param welcomeMessage: on the first installation of the app the user will get this message
   :type welcomeMessage: str
   :param defaultMessage: when user triggers hi, hello, help they will get this message
   :type defaultMessage: str
   :param disclaimer: given along with every bot response
   :type disclaimer: str
   :param messagingBotName: the name you want to see at various places instead of Abacus.AI
   :type messagingBotName: str
   :param useDefaultLabel: to use the default Abacus.AI label in case it is set to true
   :type useDefaultLabel: bool
   :param initAckReq: Set to true if the initial Acknowledgment for the query is required by the user
   :type initAckReq: bool
   :param defaultLabels: Dictionary of default labels, if the user-specified labels aren't set
   :type defaultLabels: dict
   :param enabledExternalLinks: list of external application which have external links applicable
   :type enabledExternalLinks: list


   .. py:attribute:: welcome_message
      :value: None



   .. py:attribute:: default_message
      :value: None



   .. py:attribute:: disclaimer
      :value: None



   .. py:attribute:: messaging_bot_name
      :value: None



   .. py:attribute:: use_default_label
      :value: None



   .. py:attribute:: init_ack_req
      :value: None



   .. py:attribute:: default_labels
      :value: None



   .. py:attribute:: enabled_external_links
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Model(client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, shared=None, sharedAt=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A model

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The user-friendly name for the model.
   :type name: str
   :param modelId: The unique identifier of the model.
   :type modelId: str
   :param modelConfigType: Name of the TrainingConfig class of the model_config.
   :type modelConfigType: str
   :param modelPredictionConfig: The prediction config options for the model.
   :type modelPredictionConfig: dict
   :param createdAt: Date and time at which the model was created.
   :type createdAt: str
   :param projectId: The project this model belongs to.
   :type projectId: str
   :param shared: If model is shared to the Abacus.AI model showcase.
   :type shared: bool
   :param sharedAt: The date and time at which the model was shared to the model showcase
   :type sharedAt: str
   :param trainFunctionName: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
   :type trainFunctionName: str
   :param predictFunctionName: Name of the function found in the source code that will be executed run predictions through model. It is not executed when this function is run.
   :type predictFunctionName: str
   :param predictManyFunctionName: Name of the function found in the source code that will be executed to run batch predictions trhough the model.
   :type predictManyFunctionName: str
   :param initializeFunctionName: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model
   :type initializeFunctionName: str
   :param trainingInputTables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the functions return value).
   :type trainingInputTables: list
   :param sourceCode: Python code used to make the model.
   :type sourceCode: str
   :param cpuSize: Cpu size specified for the python model training.
   :type cpuSize: str
   :param memory: Memory in GB specified for the python model training.
   :type memory: int
   :param trainingFeatureGroupIds: The unique identifiers of the feature groups used as the inputs to train this model on.
   :type trainingFeatureGroupIds: list of unique string identifiers
   :param algorithmModelConfigs: List of algorithm specific training configs.
   :type algorithmModelConfigs: list[dict]
   :param trainingVectorStoreVersions: The vector store version IDs used as inputs during training to create this ModelVersion.
   :type trainingVectorStoreVersions: list
   :param documentRetrievers: List of document retrievers use to create this model.
   :type documentRetrievers: list
   :param documentRetrieverIds: List of document retriever IDs used to create this model.
   :type documentRetrieverIds: list
   :param isPythonModel: If this model is handled as python model
   :type isPythonModel: bool
   :param defaultAlgorithm: If set, this algorithm will always be used when deploying the model regardless of the model metrics
   :type defaultAlgorithm: str
   :param customAlgorithmConfigs: User-defined configs for each of the user-defined custom algorithm
   :type customAlgorithmConfigs: dict
   :param restrictedAlgorithms: User-selected algorithms to train.
   :type restrictedAlgorithms: dict
   :param useGpu: If this model uses gpu.
   :type useGpu: bool
   :param notebookId: The notebook associated with this model.
   :type notebookId: str
   :param trainingRequired: If training is required to keep the model up-to-date.
   :type trainingRequired: bool
   :param latestModelVersion: The latest model version.
   :type latestModelVersion: ModelVersion
   :param location: Location information for models that are imported.
   :type location: ModelLocation
   :param refreshSchedules: List of refresh schedules that indicate when the next model version will be trained
   :type refreshSchedules: RefreshSchedule
   :param codeSource: If a python model, information on the source code
   :type codeSource: CodeSource
   :param databaseConnector: Database connector used by the model.
   :type databaseConnector: DatabaseConnector
   :param dataLlmFeatureGroups: List of feature groups used by the model for queries
   :type dataLlmFeatureGroups: FeatureGroup
   :param modelConfig: The training config options used to train this model.
   :type modelConfig: TrainingConfig


   .. py:attribute:: name
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_config_type
      :value: None



   .. py:attribute:: model_prediction_config
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: shared
      :value: None



   .. py:attribute:: shared_at
      :value: None



   .. py:attribute:: train_function_name
      :value: None



   .. py:attribute:: predict_function_name
      :value: None



   .. py:attribute:: predict_many_function_name
      :value: None



   .. py:attribute:: initialize_function_name
      :value: None



   .. py:attribute:: training_input_tables
      :value: None



   .. py:attribute:: source_code
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: training_feature_group_ids
      :value: None



   .. py:attribute:: algorithm_model_configs
      :value: None



   .. py:attribute:: training_vector_store_versions
      :value: None



   .. py:attribute:: document_retrievers
      :value: None



   .. py:attribute:: document_retriever_ids
      :value: None



   .. py:attribute:: is_python_model
      :value: None



   .. py:attribute:: default_algorithm
      :value: None



   .. py:attribute:: custom_algorithm_configs
      :value: None



   .. py:attribute:: restricted_algorithms
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: training_required
      :value: None



   .. py:attribute:: location


   .. py:attribute:: refresh_schedules


   .. py:attribute:: code_source


   .. py:attribute:: database_connector


   .. py:attribute:: data_llm_feature_groups


   .. py:attribute:: latest_model_version


   .. py:attribute:: model_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: describe_train_test_data_split_feature_group()

      Get the train and test data split for a trained model by its unique identifier. This is only supported for models with custom algorithms.

      :param model_id: The unique ID of the model. By default, the latest model version will be returned if no version is specified.
      :type model_id: str

      :returns: The feature group containing the training data and fold information.
      :rtype: FeatureGroup



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Model



   .. py:method:: describe()

      Retrieves a full description of the specified model.

      :param model_id: Unique string identifier associated with the model.
      :type model_id: str

      :returns: Description of the model.
      :rtype: Model



   .. py:method:: rename(name)

      Renames a model

      :param name: The new name to assign to the model.
      :type name: str



   .. py:method:: update_python(function_source_code = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None, is_thread_safe = None, training_config = None)

      Updates an existing Python Model using user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the `train` and `predict` functions with the materialized feature groups for those input feature groups.

      This method expects `functionSourceCode` to be a valid language source file which contains the functions named `trainFunctionName` and `predictFunctionName`. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName`. `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param function_source_code: Contents of a valid Python source code file. The source code should contain the functions named `trainFunctionName` and `predictFunctionName`. A list of allowed import and system libraries for each language is specified in the user functions documentation section.
      :type function_source_code: str
      :param train_function_name: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the source code that will be executed to run batch predictions through the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param initialize_function_name: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model.
      :type initialize_function_name: str
      :param training_input_tables: List of feature groups that are supplied to the `train` function as parameters. Each of the parameters are materialized DataFrames (same type as the functions return value).
      :type training_input_tables: list
      :param cpu_size: Size of the CPU for the model training function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: `['numpy==1.2.3', 'pandas>=1.4.0']`.
      :type package_requirements: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool
      :param is_thread_safe: Whether this model is thread safe
      :type is_thread_safe: bool
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig

      :returns: The updated model.
      :rtype: Model



   .. py:method:: update_python_zip(train_function_name = None, predict_function_name = None, predict_many_function_name = None, train_module_name = None, predict_module_name = None, training_input_tables = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None)

      Updates an existing Python Model using a provided zip file. If a list of input feature groups are supplied, they will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.

      This method expects `trainModuleName` and `predictModuleName` to be valid language source files which contain the functions named `trainFunctionName` and `predictFunctionName`, respectively. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName`, and `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param train_function_name: Name of the function found in the train module that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the predict module that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the predict module that will be executed to run batch predictions through the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param train_module_name: Full path of the module that contains the train function from the root of the zip.
      :type train_module_name: str
      :param predict_module_name: Full path of the module that contains the predict function from the root of the zip.
      :type predict_module_name: str
      :param training_input_tables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the function's return value).
      :type training_input_tables: list
      :param cpu_size: Size of the CPU for the model training function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool

      :returns: The updated model.
      :rtype: Upload



   .. py:method:: update_python_git(application_connector_id = None, branch_name = None, python_root = None, train_function_name = None, predict_function_name = None, predict_many_function_name = None, train_module_name = None, predict_module_name = None, training_input_tables = None, cpu_size = None, memory = None, use_gpu = None)

      Updates an existing Python model using an existing Git application connector. If a list of input feature groups are supplied, these will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.

      This method expects `trainModuleName` and `predictModuleName` to be valid language source files which contain the functions named `trainFunctionName` and `predictFunctionName`, respectively. `trainFunctionName` returns the `ModelVersion` that is the result of training the model using `trainFunctionName`, and `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param application_connector_id: The unique ID associated with the Git application connector.
      :type application_connector_id: str
      :param branch_name: Name of the branch in the Git repository to be used for training.
      :type branch_name: str
      :param python_root: Path from the top level of the Git repository to the directory containing the Python source code. If not provided, the default is the root of the Git repository.
      :type python_root: str
      :param train_function_name: Name of the function found in train module that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param predict_function_name: Name of the function found in the predict module that will be executed to run predictions through model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the predict module that will be executed to run batch predictions through model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param train_module_name: Full path of the module that contains the train function from the root of the zip.
      :type train_module_name: str
      :param predict_module_name: Full path of the module that contains the predict function from the root of the zip.
      :type predict_module_name: str
      :param training_input_tables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the functions return value).
      :type training_input_tables: list
      :param cpu_size: Size of the CPU for the model training function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function.
      :type memory: int
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool

      :returns: The updated model.
      :rtype: Model



   .. py:method:: set_training_config(training_config, feature_group_ids = None)

      Edits the default model training config

      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig
      :param feature_group_ids: The list of feature groups used as input to the model.
      :type feature_group_ids: List

      :returns: The model object corresponding to the updated training config.
      :rtype: Model



   .. py:method:: set_prediction_params(prediction_config)

      Sets the model prediction config for the model

      :param prediction_config: Prediction configuration for the model.
      :type prediction_config: dict

      :returns: Model object after the prediction configuration is applied.
      :rtype: Model



   .. py:method:: get_metrics(model_version = None, return_graphs = False, validation = False)

      Retrieves metrics for all the algorithms trained in this model version.

      If only the model's unique identifier (model_id) is specified, the latest trained version of the model (model_version) is used.


      :param model_version: Version of the model.
      :type model_version: str
      :param return_graphs: If true, will return the information used for the graphs on the model metrics page such as PR Curve per label.
      :type return_graphs: bool
      :param validation: If true, will return the validation metrics instead of the test metrics.
      :type validation: bool

      :returns: An object containing the model metrics and explanations for what each metric means.
      :rtype: ModelMetrics



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      Retrieves a list of versions for a given model.

      :param limit: Maximum length of the list of all dataset versions.
      :type limit: int
      :param start_after_version: Unique string identifier of the version after which the list starts.
      :type start_after_version: str

      :returns: An array of model versions.
      :rtype: list[ModelVersion]



   .. py:method:: retrain(deployment_ids = None, feature_group_ids = None, custom_algorithms = None, builtin_algorithms = None, custom_algorithm_configs = None, cpu_size = None, memory = None, training_config = None, algorithm_training_configs = None)

      Retrains the specified model, with an option to choose the deployments to which the retraining will be deployed.

      :param deployment_ids: List of unique string identifiers of deployments to automatically deploy to.
      :type deployment_ids: List
      :param feature_group_ids: List of feature group IDs provided by the user to train the model on.
      :type feature_group_ids: List
      :param custom_algorithms: List of user-defined algorithms to train. If not set, will honor the runs from the last time and applicable new custom algorithms.
      :type custom_algorithms: list
      :param builtin_algorithms: List of algorithm names or algorithm IDs of Abacus.AI built-in algorithms to train. If not set, will honor the runs from the last time and applicable new built-in algorithms.
      :type builtin_algorithms: list
      :param custom_algorithm_configs: User-defined training configs for each custom algorithm.
      :type custom_algorithm_configs: dict
      :param cpu_size: Size of the CPU for the user-defined algorithms during training.
      :type cpu_size: str
      :param memory: Memory (in GB) for the user-defined algorithms during training.
      :type memory: int
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig
      :param algorithm_training_configs: List of algorithm specifc training configs that will be part of the model training AutoML run.
      :type algorithm_training_configs: list

      :returns: The model that is being retrained.
      :rtype: Model



   .. py:method:: delete()

      Deletes the specified model and all its versions. Models which are currently used in deployments cannot be deleted.

      :param model_id: Unique string identifier of the model to delete.
      :type model_id: str



   .. py:method:: set_default_algorithm(algorithm = None, data_cluster_type = None)

      Sets the model's algorithm to default for all new deployments

      :param algorithm: Algorithm to pin in the model.
      :type algorithm: str
      :param data_cluster_type: Data cluster type to set the lead model for.
      :type data_cluster_type: str



   .. py:method:: list_artifacts_exports(limit = 25)

      List all the model artifacts exports.

      :param limit: Maximum length of the list of all exports.
      :type limit: int

      :returns: List of model artifacts exports.
      :rtype: list[ModelArtifactsExport]



   .. py:method:: get_training_types_for_deployment(model_version = None, algorithm = None)

      Returns types of models that can be deployed for a given model instance ID.

      :param model_version: The unique ID associated with the model version to deploy.
      :type model_version: str
      :param algorithm: The unique ID associated with the algorithm to deploy.
      :type algorithm: str

      :returns: Model training types for deployment.
      :rtype: ModelTrainingTypeForDeployment



   .. py:method:: update_agent(function_source_code = None, agent_function_name = None, memory = None, package_requirements = None, description = None, enable_binary_input = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None, agent_interface = None, included_modules = None, org_level_connectors = None, user_level_connectors = None, initialize_function_name = None, initialize_function_code = None)

      Updates an existing AI Agent. A new version of the agent will be created and published.

      :param memory: Memory (in GB) for the agent.
      :type memory: int
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param description: A description of the agent, including its purpose and instructions.
      :type description: str
      :param workflow_graph: The workflow graph for the agent.
      :type workflow_graph: WorkflowGraph
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param included_modules: A list of user created custom modules to include in the agent's environment.
      :type included_modules: List
      :param org_level_connectors: A list of org level connector ids to be used by the agent.
      :type org_level_connectors: List
      :param user_level_connectors: A dictionary mapping ApplicationConnectorType keys to lists of OAuth scopes. Each key represents a specific user level application connector, while the value is a list of scopes that define the permissions granted to the application.
      :type user_level_connectors: Dict
      :param initialize_function_name: The name of the function to be used for initialization.
      :type initialize_function_name: str
      :param initialize_function_code: The function code to be used for initialization.
      :type initialize_function_code: str

      :returns: The updated agent.
      :rtype: Agent



   .. py:method:: wait_for_training(timeout=None)

      A waiting call until model is trained.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_evaluation(timeout=None)

      A waiting call until model is evaluated completely.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_publish(timeout=None)

      A waiting call until agent is published.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_full_automl(timeout=None)

      A waiting call until full AutoML cycle is completed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status(get_automl_status = False)

      Gets the status of the model training.

      :returns: A string describing the status of a model training (pending, complete, etc.).
      :rtype: str



   .. py:method:: create_refresh_policy(cron)

      To create a refresh policy for a model.

      :param cron: A cron style string to set the refresh time.
      :type cron: str

      :returns: The refresh policy object.
      :rtype: RefreshPolicy



   .. py:method:: list_refresh_policies()

      Gets the refresh policies in a list.

      :returns: A list of refresh policy objects.
      :rtype: List[RefreshPolicy]



   .. py:method:: get_train_test_feature_group_as_pandas()

      Get the model train test data split feature group as pandas.

      :returns: A pandas dataframe for the training data with fold column.
      :rtype: pandas.Dataframe



.. py:class:: ModelArtifactsExport(client, modelArtifactsExportId=None, modelVersion=None, outputLocation=None, status=None, createdAt=None, exportCompletedAt=None, error=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Model Artifacts Export Job

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelArtifactsExportId: Unique identifier for this export.
   :type modelArtifactsExportId: str
   :param modelVersion: Version of the model being exported.
   :type modelVersion: str
   :param outputLocation: File Connector location the feature group is being written to.
   :type outputLocation: str
   :param status: Current status of the export.
   :type status: str
   :param createdAt: Timestamp at which the export was created (ISO-8601 format).
   :type createdAt: str
   :param exportCompletedAt: Timestamp at which the export completed (ISO-8601 format).
   :type exportCompletedAt: str
   :param error: If `status` is `FAILED`, this field will be populated with an error.
   :type error: str


   .. py:attribute:: model_artifacts_export_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: output_location
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: export_completed_at
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: ModelArtifactsExport



   .. py:method:: describe()

      Get the description and status of the model artifacts export.

      :param model_artifacts_export_id: A unique string identifier for the export.
      :type model_artifacts_export_id: str

      :returns: Object describing the export and its status.
      :rtype: ModelArtifactsExport



   .. py:method:: wait_for_results(timeout=3600)

      A waiting call until model artifacts export is created.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the model artifacts export.

      :returns: A string describing the status of a model artifacts export (pending, complete, etc.).
      :rtype: str



.. py:class:: ModelBlueprintExport(client, modelVersion=None, currentTrainingConfig=None, modelBlueprintStages={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Model Blueprint

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelVersion: Version of the model that the blueprint is for.
   :type modelVersion: str
   :param currentTrainingConfig: The current training configuration for the model. It can be used to get training configs and train a new model
   :type currentTrainingConfig: dict
   :param modelBlueprintStages: The stages of the model blueprint. Each one includes the stage name, display name, description, parameters, and predecessors.
   :type modelBlueprintStages: ModelBlueprintStage


   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: current_training_config
      :value: None



   .. py:attribute:: model_blueprint_stages


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelBlueprintStage(client, stageName=None, displayName=None, description=None, params=None, predecessors=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A stage in the model blueprint export process.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param stageName: The name of the stage.
   :type stageName: str
   :param displayName: The display name of the stage.
   :type displayName: str
   :param description: The description of the stage.
   :type description: str
   :param params: The parameters for the stage.
   :type params: dict
   :param predecessors: A list of stages that occur directly before this stage.
   :type predecessors: list


   .. py:attribute:: stage_name
      :value: None



   .. py:attribute:: display_name
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: params
      :value: None



   .. py:attribute:: predecessors
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelLocation(client, location=None, artifactNames=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Provide location information for the plug-and-play model.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param location: Location of the plug-and-play model.
   :type location: str
   :param artifactNames: Representations of the names of the artifacts used to create the model.
   :type artifactNames: dict


   .. py:attribute:: location
      :value: None



   .. py:attribute:: artifact_names
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelMetrics(client, algoMetrics=None, selectedAlgorithm=None, selectedAlgorithmName=None, modelId=None, modelVersion=None, metricNames=None, targetColumn=None, trainValTestSplit=None, trainingCompletedAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Metrics of the trained model.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param algoMetrics: Dictionary mapping algorithm ID to algorithm name and algorithm metrics dictionary
   :type algoMetrics: dict
   :param selectedAlgorithm: The algorithm ID of the selected (default) algorithm that will be used in deployments of this Model Version
   :type selectedAlgorithm: str
   :param selectedAlgorithmName: The algorithm name of the selected (default) algorithm that will be used in deployments of this Model Version
   :type selectedAlgorithmName: str
   :param modelId: The Model ID
   :type modelId: str
   :param modelVersion: The Model Version
   :type modelVersion: str
   :param metricNames: Maps shorthand names of the metrics to their verbose names
   :type metricNames: dict
   :param targetColumn: The target feature that the model was trained to predict
   :type targetColumn: str
   :param trainValTestSplit: Info on train, val and test split
   :type trainValTestSplit: dict
   :param trainingCompletedAt: Timestamp when training was completed
   :type trainingCompletedAt: datetime


   .. py:attribute:: algo_metrics
      :value: None



   .. py:attribute:: selected_algorithm
      :value: None



   .. py:attribute:: selected_algorithm_name
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: metric_names
      :value: None



   .. py:attribute:: target_column
      :value: None



   .. py:attribute:: train_val_test_split
      :value: None



   .. py:attribute:: training_completed_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelMonitor(client, modelMonitorId=None, name=None, createdAt=None, projectId=None, trainingFeatureGroupId=None, predictionFeatureGroupId=None, predictionFeatureGroupVersion=None, trainingFeatureGroupVersion=None, alertConfig=None, biasMetricId=None, metricConfigs=None, featureGroupMonitorConfigs=None, metricTypes=None, modelId=None, starred=None, batchPredictionId=None, monitorType=None, edaConfigs=None, trainingForecastConfig=None, predictionForecastConfig=None, forecastFrequency=None, trainingFeatureGroupSampling=None, predictionFeatureGroupSampling=None, monitorDriftConfig=None, predictionDataUseMappings=None, trainingDataUseMappings=None, refreshSchedules={}, latestMonitorModelVersion={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A model monitor

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelMonitorId: The unique identifier of the model monitor.
   :type modelMonitorId: str
   :param name: The user-friendly name for the model monitor.
   :type name: str
   :param createdAt: Date and time at which the model was created.
   :type createdAt: str
   :param projectId: The project this model belongs to.
   :type projectId: str
   :param trainingFeatureGroupId: Feature group IDs that this model monitor is monitoring.
   :type trainingFeatureGroupId: list[str]
   :param predictionFeatureGroupId: Feature group IDs that this model monitor is monitoring.
   :type predictionFeatureGroupId: list[str]
   :param predictionFeatureGroupVersion: Feature group versions that this model monitor is monitoring.
   :type predictionFeatureGroupVersion: list[str]
   :param trainingFeatureGroupVersion: Feature group versions that this model monitor is monitoring.
   :type trainingFeatureGroupVersion: list[str]
   :param alertConfig: Alerting configuration for this model monitor.
   :type alertConfig: dict
   :param biasMetricId: The bias metric ID
   :type biasMetricId: str
   :param metricConfigs: Configurations for model monitor
   :type metricConfigs: dict
   :param featureGroupMonitorConfigs: Configurations for feature group monitor
   :type featureGroupMonitorConfigs: dict
   :param metricTypes: List of metric types
   :type metricTypes: dict
   :param modelId: Model ID that this model monitor is monitoring.
   :type modelId: str
   :param starred: Whether this model monitor is starred.
   :type starred: bool
   :param batchPredictionId: The batch prediction ID this model monitor monitors
   :type batchPredictionId: str
   :param monitorType: The type of the monitor, one of MODEL_MONITOR, or FEATURE_GROUP_MONITOR
   :type monitorType: str
   :param edaConfigs: The configs for EDA
   :type edaConfigs: dict
   :param trainingForecastConfig: The tarining config for forecast monitors
   :type trainingForecastConfig: dict
   :param predictionForecastConfig: The prediction config for forecast monitors
   :type predictionForecastConfig: dict
   :param forecastFrequency: The frequency of the forecast
   :type forecastFrequency: str
   :param trainingFeatureGroupSampling: Whether or not we sample from training feature group
   :type trainingFeatureGroupSampling: bool
   :param predictionFeatureGroupSampling: Whether or not we sample from prediction feature group
   :type predictionFeatureGroupSampling: bool
   :param monitorDriftConfig: The monitor drift config for the monitor
   :type monitorDriftConfig: dict
   :param predictionDataUseMappings: The data_use mapping of the prediction features
   :type predictionDataUseMappings: dict
   :param trainingDataUseMappings: The data_use mapping of the training features
   :type trainingDataUseMappings: dict
   :param latestMonitorModelVersion: The latest model monitor version.
   :type latestMonitorModelVersion: ModelMonitorVersion
   :param refreshSchedules: List of refresh schedules that indicate when the next model version will be trained.
   :type refreshSchedules: RefreshSchedule


   .. py:attribute:: model_monitor_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: training_feature_group_id
      :value: None



   .. py:attribute:: prediction_feature_group_id
      :value: None



   .. py:attribute:: prediction_feature_group_version
      :value: None



   .. py:attribute:: training_feature_group_version
      :value: None



   .. py:attribute:: alert_config
      :value: None



   .. py:attribute:: bias_metric_id
      :value: None



   .. py:attribute:: metric_configs
      :value: None



   .. py:attribute:: feature_group_monitor_configs
      :value: None



   .. py:attribute:: metric_types
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: starred
      :value: None



   .. py:attribute:: batch_prediction_id
      :value: None



   .. py:attribute:: monitor_type
      :value: None



   .. py:attribute:: eda_configs
      :value: None



   .. py:attribute:: training_forecast_config
      :value: None



   .. py:attribute:: prediction_forecast_config
      :value: None



   .. py:attribute:: forecast_frequency
      :value: None



   .. py:attribute:: training_feature_group_sampling
      :value: None



   .. py:attribute:: prediction_feature_group_sampling
      :value: None



   .. py:attribute:: monitor_drift_config
      :value: None



   .. py:attribute:: prediction_data_use_mappings
      :value: None



   .. py:attribute:: training_data_use_mappings
      :value: None



   .. py:attribute:: refresh_schedules


   .. py:attribute:: latest_monitor_model_version


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: rerun()

      Re-runs the specified model monitor.

      :param model_monitor_id: Unique string identifier of the model monitor to re-run.
      :type model_monitor_id: str

      :returns: The model monitor that is being re-run.
      :rtype: ModelMonitor



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: ModelMonitor



   .. py:method:: describe()

      Retrieves a full description of the specified model monitor.

      :param model_monitor_id: Unique string identifier associated with the model monitor.
      :type model_monitor_id: str

      :returns: Description of the model monitor.
      :rtype: ModelMonitor



   .. py:method:: get_summary()

      Gets the summary of a model monitor across versions.

      :param model_monitor_id: A unique string identifier associated with the model monitor.
      :type model_monitor_id: str

      :returns: An object describing integrity, bias violations, model accuracy and drift for the model monitor.
      :rtype: ModelMonitorSummary



   .. py:method:: list_versions(limit = 100, start_after_version = None)

      Retrieves a list of versions for a given model monitor.

      :param limit: The maximum length of the list of all model monitor versions.
      :type limit: int
      :param start_after_version: The ID of the version after which the list starts.
      :type start_after_version: str

      :returns: A list of model monitor versions.
      :rtype: list[ModelMonitorVersion]



   .. py:method:: rename(name)

      Renames a model monitor

      :param name: The new name to apply to the model monitor.
      :type name: str



   .. py:method:: delete()

      Deletes the specified Model Monitor and all its versions.

      :param model_monitor_id: Unique identifier of the Model Monitor to delete.
      :type model_monitor_id: str



   .. py:method:: list_monitor_alerts_for_monitor(realtime_monitor_id = None)

      Retrieves the list of monitor alerts for a specified monitor. One of the model_monitor_id or realtime_monitor_id is required but not both.

      :param realtime_monitor_id: The unique ID associated with the real-time monitor.
      :type realtime_monitor_id: str

      :returns: A list of monitor alerts.
      :rtype: list[MonitorAlert]



.. py:class:: ModelMonitorOrgSummary(client, summary=None, featureDrift=None, labelDrift=None, dataIntegrity=None, performance=None, alerts=None, monitorData=None, totalStarredMonitors=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A summary of an organization's model monitors

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param summary: Count of monitors, count of versions, count of total rows of prediction data, count of failed versions.
   :type summary: dict
   :param featureDrift: Percentage of monitors with and without KL divergence > 2.
   :type featureDrift: dict
   :param labelDrift: Histogram of label drift across versions.
   :type labelDrift: dict
   :param dataIntegrity: Counts of violations.
   :type dataIntegrity: dict
   :param performance: Model accuracy information.
   :type performance: dict
   :param alerts: Count of alerts that are raised.
   :type alerts: dict
   :param monitorData: Information about monitors used in the summary for each time period.
   :type monitorData: dict
   :param totalStarredMonitors: Total number of starred monitors.
   :type totalStarredMonitors: int


   .. py:attribute:: summary
      :value: None



   .. py:attribute:: feature_drift
      :value: None



   .. py:attribute:: label_drift
      :value: None



   .. py:attribute:: data_integrity
      :value: None



   .. py:attribute:: performance
      :value: None



   .. py:attribute:: alerts
      :value: None



   .. py:attribute:: monitor_data
      :value: None



   .. py:attribute:: total_starred_monitors
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelMonitorSummary(client, modelAccuracy=None, modelDrift=None, dataIntegrity=None, biasViolations=None, alerts=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A summary of model monitor

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelAccuracy: A list of model accuracy objects including accuracy and monitor version information.
   :type modelAccuracy: list
   :param modelDrift: A list of model drift objects including label and prediction drifts and monitor version information.
   :type modelDrift: list
   :param dataIntegrity: A list of data integrity objects including counts of violations and monitor version information.
   :type dataIntegrity: list
   :param biasViolations: A list of bias objects including bias counts and monitor version information.
   :type biasViolations: list
   :param alerts: A list of alerts by type for each model monitor instance
   :type alerts: list


   .. py:attribute:: model_accuracy
      :value: None



   .. py:attribute:: model_drift
      :value: None



   .. py:attribute:: data_integrity
      :value: None



   .. py:attribute:: bias_violations
      :value: None



   .. py:attribute:: alerts
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelMonitorSummaryFromOrg(client, data=None, infos=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A summary of model monitor given an organization

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param data: A list of either model accuracy, drift, data integrity, or bias chart objects and their monitor version information.
   :type data: list
   :param infos: A dictionary of model monitor information.
   :type infos: dict


   .. py:attribute:: data
      :value: None



   .. py:attribute:: infos
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelMonitorVersion(client, modelMonitorVersion=None, status=None, modelMonitorId=None, monitoringStartedAt=None, monitoringCompletedAt=None, trainingFeatureGroupVersion=None, predictionFeatureGroupVersion=None, error=None, pendingDeploymentIds=None, failedDeploymentIds=None, metricConfigs=None, featureGroupMonitorConfigs=None, metricTypes=None, modelVersion=None, batchPredictionVersion=None, edaConfigs=None, trainingForecastConfig=None, predictionForecastConfig=None, forecastFrequency=None, monitorDriftConfig=None, predictionDataUseMappings=None, trainingDataUseMappings=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of a model monitor

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelMonitorVersion: The unique identifier of a model monitor version.
   :type modelMonitorVersion: str
   :param status: The current status of the model.
   :type status: str
   :param modelMonitorId: A reference to the model monitor this version belongs to.
   :type modelMonitorId: str
   :param monitoringStartedAt: The start time and date of the monitoring process.
   :type monitoringStartedAt: str
   :param monitoringCompletedAt: The end time and date of the monitoring process.
   :type monitoringCompletedAt: str
   :param trainingFeatureGroupVersion: Feature group version IDs that this refresh pipeline run is monitoring.
   :type trainingFeatureGroupVersion: list[str]
   :param predictionFeatureGroupVersion: Feature group version IDs that this refresh pipeline run is monitoring.
   :type predictionFeatureGroupVersion: list[str]
   :param error: Relevant error if the status is FAILED.
   :type error: str
   :param pendingDeploymentIds: List of deployment IDs where deployment is pending.
   :type pendingDeploymentIds: list
   :param failedDeploymentIds: List of failed deployment IDs.
   :type failedDeploymentIds: list
   :param metricConfigs: List of metric configs for the model monitor instance.
   :type metricConfigs: list[dict]
   :param featureGroupMonitorConfigs: Configurations for feature group monitor
   :type featureGroupMonitorConfigs: dict
   :param metricTypes: List of metric types.
   :type metricTypes: list
   :param modelVersion: Model version IDs that this refresh pipeline run is monitoring.
   :type modelVersion: list[str]
   :param batchPredictionVersion: The batch prediction version this model monitor is monitoring
   :type batchPredictionVersion: str
   :param edaConfigs: The list of eda configs for the version
   :type edaConfigs: list
   :param trainingForecastConfig: The training forecast config for the monitor version
   :type trainingForecastConfig: dict
   :param predictionForecastConfig: The prediction forecast config for the monitor version
   :type predictionForecastConfig: dict
   :param forecastFrequency: The forecast frequency for the monitor version
   :type forecastFrequency: str
   :param monitorDriftConfig: The monitor drift config for the monitor version
   :type monitorDriftConfig: dict
   :param predictionDataUseMappings: The mapping of prediction data use to feature group version
   :type predictionDataUseMappings: dict
   :param trainingDataUseMappings: The mapping of training data use to feature group version
   :type trainingDataUseMappings: dict


   .. py:attribute:: model_monitor_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: model_monitor_id
      :value: None



   .. py:attribute:: monitoring_started_at
      :value: None



   .. py:attribute:: monitoring_completed_at
      :value: None



   .. py:attribute:: training_feature_group_version
      :value: None



   .. py:attribute:: prediction_feature_group_version
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: pending_deployment_ids
      :value: None



   .. py:attribute:: failed_deployment_ids
      :value: None



   .. py:attribute:: metric_configs
      :value: None



   .. py:attribute:: feature_group_monitor_configs
      :value: None



   .. py:attribute:: metric_types
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: batch_prediction_version
      :value: None



   .. py:attribute:: eda_configs
      :value: None



   .. py:attribute:: training_forecast_config
      :value: None



   .. py:attribute:: prediction_forecast_config
      :value: None



   .. py:attribute:: forecast_frequency
      :value: None



   .. py:attribute:: monitor_drift_config
      :value: None



   .. py:attribute:: prediction_data_use_mappings
      :value: None



   .. py:attribute:: training_data_use_mappings
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: get_prediction_drift()

      Gets the label and prediction drifts for a model monitor.

      :param model_monitor_version: Unique string identifier for a model monitor version created under the project.
      :type model_monitor_version: str

      :returns: Object describing training and prediction output label and prediction distributions.
      :rtype: DriftDistributions



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: ModelMonitorVersion



   .. py:method:: describe()

      Retrieves a full description of the specified model monitor version.

      :param model_monitor_version: The unique version ID of the model monitor version.
      :type model_monitor_version: str

      :returns: A model monitor version.
      :rtype: ModelMonitorVersion



   .. py:method:: delete()

      Deletes the specified model monitor version.

      :param model_monitor_version: Unique identifier of the model monitor version to delete.
      :type model_monitor_version: str



   .. py:method:: metric_data(metric_type, actual_values_to_detail = None)

      Provides the data needed for decile metrics associated with the model monitor.

      :param metric_type: The type of metric to get data for.
      :type metric_type: str
      :param actual_values_to_detail: The actual values to detail.
      :type actual_values_to_detail: list

      :returns: Data associated with the metric.
      :rtype: ModelMonitorVersionMetricData



   .. py:method:: list_monitor_alert_versions_for_monitor_version()

      Retrieves the list of monitor alert versions for a specified monitor instance.

      :param model_monitor_version: The unique ID associated with the model monitor.
      :type model_monitor_version: str

      :returns: A list of monitor alert versions.
      :rtype: list[MonitorAlertVersion]



   .. py:method:: get_drift_for_feature(feature_name, nested_feature_name = None)

      Gets the feature drift associated with a single feature in an output feature group from a prediction.

      :param feature_name: Name of the feature to view the distribution of.
      :type feature_name: str
      :param nested_feature_name: Optionally, the name of the nested feature that the feature is in.
      :type nested_feature_name: str

      :returns: An object describing the training and prediction output feature distributions.
      :rtype: FeatureDistribution



   .. py:method:: get_outliers_for_feature(feature_name = None, nested_feature_name = None)

      Gets a list of outliers measured by a single feature (or overall) in an output feature group from a prediction.

      :param feature_name: Name of the feature to view the distribution of.
      :type feature_name: str
      :param nested_feature_name: Optionally, the name of the nested feature that the feature is in.
      :type nested_feature_name: str



   .. py:method:: wait_for_monitor(timeout=1200)

      A waiting call until model monitor version is ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the model monitor version.

      :returns: A string describing the status of the model monitor version, for e.g., pending, complete, etc.
      :rtype: str



.. py:class:: ModelMonitorVersionMetricData(client, name=None, algoName=None, featureGroupVersion=None, modelMonitor=None, modelMonitorVersion=None, metricInfos=None, metricNames=None, metrics=None, metricCharts=None, otherMetrics=None, actualValuesSupportedForDrilldown=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Data for displaying model monitor version metric data

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the metric type
   :type name: str
   :param algoName: The name of the algo used for the prediction metric
   :type algoName: str
   :param featureGroupVersion: The prediction feature group used for analysis
   :type featureGroupVersion: str
   :param modelMonitor: The id of the model monitor
   :type modelMonitor: str
   :param modelMonitorVersion: The id of the model monitor version
   :type modelMonitorVersion: str
   :param metricInfos: Name and description for metrics
   :type metricInfos: dict
   :param metricNames: Internal name to external name mapping
   :type metricNames: dict
   :param metrics: Metric name to metric data
   :type metrics: dict
   :param metricCharts: List of different metric charts
   :type metricCharts: list
   :param otherMetrics: List of other metrics to optionally plot
   :type otherMetrics: list
   :param actualValuesSupportedForDrilldown: List of values support for drilldown
   :type actualValuesSupportedForDrilldown: list


   .. py:attribute:: name
      :value: None



   .. py:attribute:: algo_name
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: model_monitor
      :value: None



   .. py:attribute:: model_monitor_version
      :value: None



   .. py:attribute:: metric_infos
      :value: None



   .. py:attribute:: metric_names
      :value: None



   .. py:attribute:: metrics
      :value: None



   .. py:attribute:: metric_charts
      :value: None



   .. py:attribute:: other_metrics
      :value: None



   .. py:attribute:: actual_values_supported_for_drilldown
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelTrainingTypeForDeployment(client, label=None, value=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Model training types for deployment.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param label: Labels to show to users in deployment UI
   :type label: str
   :param value: Value to use on backend for deployment API call
   :type value: str


   .. py:attribute:: label
      :value: None



   .. py:attribute:: value
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelUpload(client, modelId=None, modelVersion=None, status=None, createdAt=None, modelUploadId=None, embeddingsUploadId=None, artifactsUploadId=None, verificationsUploadId=None, defaultItemsUploadId=None, modelFileUploadId=None, modelStateUploadId=None, inputPreprocessorUploadId=None, requirementsUploadId=None, resourcesUploadId=None, multiCatalogEmbeddingsUploadId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A model version that includes the upload identifiers for the various required files.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelId: A reference to the model this version belongs to.
   :type modelId: str
   :param modelVersion: A unique identifier for the model version.
   :type modelVersion: str
   :param status: The current status of the model.
   :type status: str
   :param createdAt: The timestamp at which the model version was created, in ISO-8601 format.
   :type createdAt: str
   :param modelUploadId: An upload identifier to be used when uploading the TensorFlow Saved Model.
   :type modelUploadId: str
   :param embeddingsUploadId: An upload identifier to be used when uploading the embeddings CSV.
   :type embeddingsUploadId: str
   :param artifactsUploadId: An upload identifier to be used when uploading the artifacts JSON file.
   :type artifactsUploadId: str
   :param verificationsUploadId: An upload identifier to be used when uploading the verifications JSON file.
   :type verificationsUploadId: str
   :param defaultItemsUploadId: An upload identifier to be used when uploading the default items JSON file.
   :type defaultItemsUploadId: str
   :param modelFileUploadId: An upload identifier to be used when uploading the model JSON file.
   :type modelFileUploadId: str
   :param modelStateUploadId: An upload identifier to be used when uploading the model state JSON file.
   :type modelStateUploadId: str
   :param inputPreprocessorUploadId: An upload identifier to be used when uploading the input preprocessor JSON file.
   :type inputPreprocessorUploadId: str
   :param requirementsUploadId: An upload identifier to be used when uploading the requirements JSON file.
   :type requirementsUploadId: str
   :param resourcesUploadId: An upload identifier to be used when uploading the resources JSON file.
   :type resourcesUploadId: str
   :param multiCatalogEmbeddingsUploadId: An upload identifier to be used when upload the multi-catalog embeddings CSV file.
   :type multiCatalogEmbeddingsUploadId: str


   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: model_upload_id
      :value: None



   .. py:attribute:: embeddings_upload_id
      :value: None



   .. py:attribute:: artifacts_upload_id
      :value: None



   .. py:attribute:: verifications_upload_id
      :value: None



   .. py:attribute:: default_items_upload_id
      :value: None



   .. py:attribute:: model_file_upload_id
      :value: None



   .. py:attribute:: model_state_upload_id
      :value: None



   .. py:attribute:: input_preprocessor_upload_id
      :value: None



   .. py:attribute:: requirements_upload_id
      :value: None



   .. py:attribute:: resources_upload_id
      :value: None



   .. py:attribute:: multi_catalog_embeddings_upload_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModelVersion(client, modelVersion=None, modelConfigType=None, status=None, modelId=None, modelPredictionConfig=None, trainingStartedAt=None, trainingCompletedAt=None, featureGroupVersions=None, customAlgorithms=None, builtinAlgorithms=None, error=None, pendingDeploymentIds=None, failedDeploymentIds=None, cpuSize=None, memory=None, automlComplete=None, trainingFeatureGroupIds=None, trainingDocumentRetrieverVersions=None, documentRetrieverMappings=None, bestAlgorithm=None, defaultAlgorithm=None, featureAnalysisStatus=None, dataClusterInfo=None, customAlgorithmConfigs=None, trainedModelTypes=None, useGpu=None, partialComplete=None, modelFeatureGroupSchemaMappings=None, trainingConfigUpdated=None, codeSource={}, modelConfig={}, deployableAlgorithms={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of a model

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modelVersion: The unique identifier of a model version.
   :type modelVersion: str
   :param modelConfigType: Name of the TrainingConfig class of the model_config.
   :type modelConfigType: str
   :param status: The current status of the model.
   :type status: str
   :param modelId: A reference to the model this version belongs to.
   :type modelId: str
   :param modelPredictionConfig: The prediction config options for the model.
   :type modelPredictionConfig: dict
   :param trainingStartedAt: The start time and date of the training process in ISO-8601 format.
   :type trainingStartedAt: str
   :param trainingCompletedAt: The end time and date of the training process in ISO-8601 format.
   :type trainingCompletedAt: str
   :param featureGroupVersions: A list of Feature Group version IDs used for model training.
   :type featureGroupVersions: list
   :param customAlgorithms: List of user-defined algorithms used for model training.
   :type customAlgorithms: list
   :param builtinAlgorithms: List of algorithm names builtin algorithms provided by Abacus.AI used for model training.
   :type builtinAlgorithms: list
   :param error: Relevant error if the status is FAILED.
   :type error: str
   :param pendingDeploymentIds: List of deployment IDs where deployment is pending.
   :type pendingDeploymentIds: list
   :param failedDeploymentIds: List of failed deployment IDs.
   :type failedDeploymentIds: list
   :param cpuSize: CPU size specified for the python model training.
   :type cpuSize: str
   :param memory: Memory in GB specified for the python model training.
   :type memory: int
   :param automlComplete: If true, all algorithms have completed training.
   :type automlComplete: bool
   :param trainingFeatureGroupIds: The unique identifiers of the feature groups used as inputs during training to create this ModelVersion.
   :type trainingFeatureGroupIds: list
   :param trainingDocumentRetrieverVersions: The document retriever version IDs used as inputs during training to create this ModelVersion.
   :type trainingDocumentRetrieverVersions: list
   :param documentRetrieverMappings: mapping of document retriever version to their respective information.
   :type documentRetrieverMappings: dict
   :param bestAlgorithm: Best performing algorithm.
   :type bestAlgorithm: dict
   :param defaultAlgorithm: Default algorithm that the user has selected.
   :type defaultAlgorithm: dict
   :param featureAnalysisStatus: Lifecycle of the feature analysis stage.
   :type featureAnalysisStatus: str
   :param dataClusterInfo: Information about the models for different data clusters.
   :type dataClusterInfo: dict
   :param customAlgorithmConfigs: User-defined configs for each of the user-defined custom algorithms.
   :type customAlgorithmConfigs: dict
   :param trainedModelTypes: List of trained model types.
   :type trainedModelTypes: list
   :param useGpu: Whether this model version is using gpu
   :type useGpu: bool
   :param partialComplete: If true, all required algorithms have completed training.
   :type partialComplete: bool
   :param modelFeatureGroupSchemaMappings: mapping of feature group to schema version
   :type modelFeatureGroupSchemaMappings: dict
   :param trainingConfigUpdated: If the training config has been updated since the instance was created.
   :type trainingConfigUpdated: bool
   :param codeSource: If a python model, information on where the source code is located.
   :type codeSource: CodeSource
   :param modelConfig: The training config options used to train this model.
   :type modelConfig: TrainingConfig
   :param deployableAlgorithms: List of deployable algorithms.
   :type deployableAlgorithms: DeployableAlgorithm


   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: model_config_type
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_prediction_config
      :value: None



   .. py:attribute:: training_started_at
      :value: None



   .. py:attribute:: training_completed_at
      :value: None



   .. py:attribute:: feature_group_versions
      :value: None



   .. py:attribute:: custom_algorithms
      :value: None



   .. py:attribute:: builtin_algorithms
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: pending_deployment_ids
      :value: None



   .. py:attribute:: failed_deployment_ids
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: automl_complete
      :value: None



   .. py:attribute:: training_feature_group_ids
      :value: None



   .. py:attribute:: training_document_retriever_versions
      :value: None



   .. py:attribute:: document_retriever_mappings
      :value: None



   .. py:attribute:: best_algorithm
      :value: None



   .. py:attribute:: default_algorithm
      :value: None



   .. py:attribute:: feature_analysis_status
      :value: None



   .. py:attribute:: data_cluster_info
      :value: None



   .. py:attribute:: custom_algorithm_configs
      :value: None



   .. py:attribute:: trained_model_types
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: partial_complete
      :value: None



   .. py:attribute:: model_feature_group_schema_mappings
      :value: None



   .. py:attribute:: training_config_updated
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: model_config


   .. py:attribute:: deployable_algorithms


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: describe_train_test_data_split_feature_group_version()

      Get the train and test data split for a trained model by model version. This is only supported for models with custom algorithms.

      :param model_version: The unique version ID of the model version.
      :type model_version: str

      :returns: The feature group version containing the training data and folds information.
      :rtype: FeatureGroupVersion



   .. py:method:: set_model_objective(metric = None)

      Sets the best model for all model instances of the model based on the specified metric, and updates the training configuration to use the specified metric for any future model versions.

      If metric is set to None, then just use the default selection


      :param metric: The metric to use to determine the best model.
      :type metric: str



   .. py:method:: get_feature_group_schemas_for()

      Gets the schema (including feature mappings) for all feature groups used in the model version.

      :param model_version: Unique string identifier for the version of the model.
      :type model_version: str

      :returns: List of schema for all feature groups used in the model version.
      :rtype: list[ModelVersionFeatureGroupSchema]



   .. py:method:: delete()

      Deletes the specified model version. Model versions which are currently used in deployments cannot be deleted.

      :param model_version: The unique identifier of the model version to delete.
      :type model_version: str



   .. py:method:: export_model_artifact_as_feature_group(table_name, artifact_type = None)

      Exports metric artifact data for a model as a feature group.

      :param table_name: Name of the feature group table to create.
      :type table_name: str
      :param artifact_type: eval artifact type to export.
      :type artifact_type: EvalArtifactType

      :returns: The created feature group.
      :rtype: FeatureGroup



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: ModelVersion



   .. py:method:: describe()

      Retrieves a full description of the specified model version.

      :param model_version: Unique string identifier of the model version.
      :type model_version: str

      :returns: A model version.
      :rtype: ModelVersion



   .. py:method:: get_feature_importance_by()

      Gets the feature importance calculated by various methods for the model.

      :param model_version: Unique string identifier for the model version.
      :type model_version: str

      :returns: Feature importances for the model.
      :rtype: FeatureImportance



   .. py:method:: get_training_data_logs()

      Retrieves the data preparation logs during model training.

      :param model_version: The unique version ID of the model version.
      :type model_version: str

      :returns: A list of logs.
      :rtype: list[DataPrepLogs]



   .. py:method:: get_training_logs(stdout = False, stderr = False)

      Returns training logs for the model.

      :param stdout: Set True to get info logs.
      :type stdout: bool
      :param stderr: Set True to get error logs.
      :type stderr: bool

      :returns: A function logs object.
      :rtype: FunctionLogs



   .. py:method:: export_custom(output_location, algorithm = None)

      Bundle custom model artifacts to a zip file, and export to the specified location.

      :param output_location: Location to export the model artifacts results. For example, s3://a-bucket/
      :type output_location: str
      :param algorithm: The algorithm to be exported. Optional if there's only one custom algorithm in the model version.
      :type algorithm: str

      :returns: Object describing the export and its status.
      :rtype: ModelArtifactsExport



   .. py:method:: wait_for_training(timeout=None)

      A waiting call until model gets trained.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: wait_for_full_automl(timeout=None)

      A waiting call until full AutoML cycle is completed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the model version under training.

      :returns: A string describing the status of a model training (pending, complete, etc.).
      :rtype: str



   .. py:method:: get_train_test_feature_group_as_pandas()

      Get the model train test data split feature group of the model version as pandas data frame.

      :returns: A pandas dataframe for the training data with fold column.
      :rtype: pandas.Dataframe



.. py:class:: ModelVersionFeatureGroupSchema(client, featureGroupId=None, featureGroupName=None, schema={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Schema for a feature group used in model version

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: The ID of the feature group.
   :type featureGroupId: str
   :param featureGroupName: The name of the feature group.
   :type featureGroupName: str
   :param schema: List of feature schemas of a feature group.
   :type schema: Schema


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: feature_group_name
      :value: None



   .. py:attribute:: schema


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ModificationLockInfo(client, modificationLock=None, userEmails=None, organizationGroups=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Information about a modification lock for a certain object

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param modificationLock: Whether or not the object has its modification lock activated.
   :type modificationLock: bool
   :param userEmails: The list of user emails allowed to modify the object if the object's modification lock is activated.
   :type userEmails: list of strings
   :param organizationGroups: The list organization groups allowed to modify the object if the object's modification lock is activated.
   :type organizationGroups: list of unique string identifiers


   .. py:attribute:: modification_lock
      :value: None



   .. py:attribute:: user_emails
      :value: None



   .. py:attribute:: organization_groups
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Module(client, name=None, createdAt=None, notebookId=None, hideModuleCode=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Customer created python module

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name to identify the algorithm. Only uppercase letters, numbers, and underscores are allowed.
   :type name: str
   :param createdAt: The date and time when the Python function was created, in ISO-8601 format.
   :type createdAt: str
   :param notebookId: The unique string identifier of the notebook used to create or edit the module.
   :type notebookId: str
   :param hideModuleCode: Whether the module code is hidden from external users
   :type hideModuleCode: bool
   :param codeSource: Information about the source code of the Python function.
   :type codeSource: CodeSource


   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: hide_module_code
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: MonitorAlert(client, name=None, monitorAlertId=None, createdAt=None, projectId=None, modelMonitorId=None, realtimeMonitorId=None, conditionConfig=None, actionConfig=None, conditionDescription=None, actionDescription=None, alertType=None, deploymentId=None, latestMonitorAlertVersion={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Monitor Alert

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The user-friendly name for the alert.
   :type name: str
   :param monitorAlertId: The unique identifier of the monitor alert.
   :type monitorAlertId: str
   :param createdAt: Date and time at which the monitor alert was created.
   :type createdAt: str
   :param projectId: The project this alert belongs to.
   :type projectId: str
   :param modelMonitorId: The monitor id that this alert is associated with
   :type modelMonitorId: str
   :param realtimeMonitorId: The realtime monitor id that this alert is associated with
   :type realtimeMonitorId: str
   :param conditionConfig: The condition configuration for this alert.
   :type conditionConfig: dict
   :param actionConfig: The action configuration for this alert.
   :type actionConfig: dict
   :param conditionDescription: User friendly description of the condition
   :type conditionDescription: str
   :param actionDescription: User friendly description of the action
   :type actionDescription: str
   :param alertType: The type of the alert
   :type alertType: str
   :param deploymentId: The deployment ID this alert is associated with
   :type deploymentId: str
   :param latestMonitorAlertVersion: The latest monitor alert version.
   :type latestMonitorAlertVersion: MonitorAlertVersion


   .. py:attribute:: name
      :value: None



   .. py:attribute:: monitor_alert_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: model_monitor_id
      :value: None



   .. py:attribute:: realtime_monitor_id
      :value: None



   .. py:attribute:: condition_config
      :value: None



   .. py:attribute:: action_config
      :value: None



   .. py:attribute:: condition_description
      :value: None



   .. py:attribute:: action_description
      :value: None



   .. py:attribute:: alert_type
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: latest_monitor_alert_version


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: update(alert_name = None, condition_config = None, action_config = None)

      Update monitor alert

      :param alert_name: Name of the alert.
      :type alert_name: str
      :param condition_config: Condition to run the actions for the alert.
      :type condition_config: AlertConditionConfig
      :param action_config: Configuration for the action of the alert.
      :type action_config: AlertActionConfig

      :returns: Object describing the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: MonitorAlert



   .. py:method:: describe()

      Describes a given monitor alert id

      :param monitor_alert_id: Unique identifier of the monitor alert.
      :type monitor_alert_id: str

      :returns: Object containing information about the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: run()

      Reruns a given monitor alert from latest monitor instance

      :param monitor_alert_id: Unique identifier of a monitor alert.
      :type monitor_alert_id: str

      :returns: Object describing the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: delete()

      Delets a monitor alert

      :param monitor_alert_id: The unique string identifier of the alert to delete.
      :type monitor_alert_id: str



.. py:class:: MonitorAlertVersion(client, name=None, monitorAlertVersion=None, monitorAlertId=None, status=None, createdAt=None, alertingStartedAt=None, alertingCompletedAt=None, error=None, modelMonitorVersion=None, conditionConfig=None, actionConfig=None, alertResult=None, actionStatus=None, actionError=None, actionStartedAt=None, actionCompletedAt=None, conditionDescription=None, actionDescription=None, alertType=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A monitor alert version

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The user-friendly name for the monitor alert.
   :type name: str
   :param monitorAlertVersion: The identifier for the alert version.
   :type monitorAlertVersion: str
   :param monitorAlertId: The identifier for the alert.
   :type monitorAlertId: str
   :param status: The current status of the monitor alert.
   :type status: str
   :param createdAt: Date and time at which the monitor alert was created.
   :type createdAt: str
   :param alertingStartedAt: The start time and date of the monitor alerting process.
   :type alertingStartedAt: str
   :param alertingCompletedAt: The end time and date of the monitor alerting process.
   :type alertingCompletedAt: str
   :param error: Relevant error if the status is FAILED.
   :type error: str
   :param modelMonitorVersion: The model monitor version associated with the monitor alert version.
   :type modelMonitorVersion: str
   :param conditionConfig: The condition configuration for this alert.
   :type conditionConfig: dict
   :param actionConfig: The action configuration for this alert.
   :type actionConfig: dict
   :param alertResult: The current result of the alert
   :type alertResult: str
   :param actionStatus: The current status of the action as a result of the monitor alert.
   :type actionStatus: str
   :param actionError: Relevant error if the action status is FAILED.
   :type actionError: str
   :param actionStartedAt: The start time and date of the actionfor the alerting process.
   :type actionStartedAt: str
   :param actionCompletedAt: The end time and date of the actionfor the alerting process.
   :type actionCompletedAt: str
   :param conditionDescription: User friendly description of the condition
   :type conditionDescription: str
   :param actionDescription: User friendly description of the action
   :type actionDescription: str
   :param alertType: The type of the alert
   :type alertType: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: monitor_alert_version
      :value: None



   .. py:attribute:: monitor_alert_id
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: alerting_started_at
      :value: None



   .. py:attribute:: alerting_completed_at
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: model_monitor_version
      :value: None



   .. py:attribute:: condition_config
      :value: None



   .. py:attribute:: action_config
      :value: None



   .. py:attribute:: alert_result
      :value: None



   .. py:attribute:: action_status
      :value: None



   .. py:attribute:: action_error
      :value: None



   .. py:attribute:: action_started_at
      :value: None



   .. py:attribute:: action_completed_at
      :value: None



   .. py:attribute:: condition_description
      :value: None



   .. py:attribute:: action_description
      :value: None



   .. py:attribute:: alert_type
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: MonitorAlertVersion



   .. py:method:: describe()

      Describes a given monitor alert version id

      :param monitor_alert_version: Unique string identifier for the monitor alert.
      :type monitor_alert_version: str

      :returns: An object describing the monitor alert version.
      :rtype: MonitorAlertVersion



   .. py:method:: wait_for_monitor_alert(timeout=1200)

      A waiting call until model monitor version is ready.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the monitor alert version.

      :returns: A string describing the status of a monitor alert version (pending, running, complete, etc.).
      :rtype: str



.. py:class:: MonitorDriftAndDistributions(client, featureDrifts=None, featureDistributions=None, nestedDrifts=None, forecastingMonitorSummary={}, embeddingsDistribution={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Summary of important model monitoring statistics for features available in a model monitoring instance

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureDrifts: A list of dicts of eligible feature names and corresponding overall feature drift measures.
   :type featureDrifts: list[dict]
   :param featureDistributions: A list of dicts of feature names and corresponding feature distributions.
   :type featureDistributions: list[dict]
   :param nestedDrifts: A list of dicts of nested feature names and corresponding overall feature drift measures.
   :type nestedDrifts: list[dict]
   :param forecastingMonitorSummary: Summary of important model monitoring statistics for features available in a model monitoring instance
   :type forecastingMonitorSummary: ForecastingMonitorSummary
   :param embeddingsDistribution: Summary of important model monitoring statistics for features available in a model monitoring instance
   :type embeddingsDistribution: EmbeddingFeatureDriftDistribution


   .. py:attribute:: feature_drifts
      :value: None



   .. py:attribute:: feature_distributions
      :value: None



   .. py:attribute:: nested_drifts
      :value: None



   .. py:attribute:: forecasting_monitor_summary


   .. py:attribute:: embeddings_distribution


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: NaturalLanguageExplanation(client, shortExplanation=None, longExplanation=None, isOutdated=None, htmlExplanation=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Natural language explanation of an artifact/object

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param shortExplanation: succinct explanation of the artifact
   :type shortExplanation: str
   :param longExplanation: Longer and verbose explanation of the artifact
   :type longExplanation: str
   :param isOutdated: Flag indicating whether the explanation is outdated due to a change in the underlying artifact
   :type isOutdated: bool
   :param htmlExplanation: HTML formatted explanation of the artifact
   :type htmlExplanation: str


   .. py:attribute:: short_explanation
      :value: None



   .. py:attribute:: long_explanation
      :value: None



   .. py:attribute:: is_outdated
      :value: None



   .. py:attribute:: html_explanation
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: NestedFeature(client, name=None, selectClause=None, featureType=None, featureMapping=None, dataType=None, sourceTable=None, originalName=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A nested feature in a feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The unique name of the column
   :type name: str
   :param selectClause: The sql logic for creating this feature's data
   :type selectClause: str
   :param featureType: Feature Type of the Feature
   :type featureType: str
   :param featureMapping: The Feature Mapping of the feature
   :type featureMapping: str
   :param dataType: Data Type of the Feature
   :type dataType: str
   :param sourceTable: The source table of the column
   :type sourceTable: str
   :param originalName: The original name of the column
   :type originalName: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: select_clause
      :value: None



   .. py:attribute:: feature_type
      :value: None



   .. py:attribute:: feature_mapping
      :value: None



   .. py:attribute:: data_type
      :value: None



   .. py:attribute:: source_table
      :value: None



   .. py:attribute:: original_name
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: NestedFeatureSchema(client, name=None, featureType=None, featureMapping=None, dataType=None, detectedFeatureType=None, sourceTable=None, pointInTimeInfo={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A schema description for a nested feature

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The unique name of the column
   :type name: str
   :param featureType: Feature Type of the Feature
   :type featureType: str
   :param featureMapping: The Feature Mapping of the feature
   :type featureMapping: str
   :param dataType: Data Type of the Feature
   :type dataType: str
   :param detectedFeatureType: The detected feature type for this feature
   :type detectedFeatureType: str
   :param sourceTable: The source table of the column
   :type sourceTable: str
   :param pointInTimeInfo: Point in time information for this feature
   :type pointInTimeInfo: PointInTimeFeatureInfo


   .. py:attribute:: name
      :value: None



   .. py:attribute:: feature_type
      :value: None



   .. py:attribute:: feature_mapping
      :value: None



   .. py:attribute:: data_type
      :value: None



   .. py:attribute:: detected_feature_type
      :value: None



   .. py:attribute:: source_table
      :value: None



   .. py:attribute:: point_in_time_info


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: NewsSearchResult(client, title=None, url=None, description=None, thumbnailUrl=None, thumbnailWidth=None, thumbnailHeight=None, faviconUrl=None, datePublished=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single news search result.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param title: The title of the news.
   :type title: str
   :param url: The URL of the news.
   :type url: str
   :param description: The description of the news.
   :type description: str
   :param thumbnailUrl: The URL of the image of the news.
   :type thumbnailUrl: str
   :param thumbnailWidth: The width of the image of the news.
   :type thumbnailWidth: int
   :param thumbnailHeight: The height of the image of the news.
   :type thumbnailHeight: int
   :param faviconUrl: The URL of the favicon of the news.
   :type faviconUrl: str
   :param datePublished: The date the news was published.
   :type datePublished: str


   .. py:attribute:: title
      :value: None



   .. py:attribute:: url
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: thumbnail_url
      :value: None



   .. py:attribute:: thumbnail_width
      :value: None



   .. py:attribute:: thumbnail_height
      :value: None



   .. py:attribute:: favicon_url
      :value: None



   .. py:attribute:: date_published
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: NlpChatResponse(client, deploymentConversationId=None, messages=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A chat response from an LLM

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param deploymentConversationId: The unique identifier of the deployment conversation.
   :type deploymentConversationId: str
   :param messages: The conversation messages in the chat.
   :type messages: list


   .. py:attribute:: deployment_conversation_id
      :value: None



   .. py:attribute:: messages
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: NullViolation(client, name=None, violation=None, trainingNullFreq=None, predictionNullFreq=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Summary of anomalous null frequencies for a feature discovered by a model monitoring instance

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: Name of feature.
   :type name: str
   :param violation: Description of null violation for a prediction feature.
   :type violation: str
   :param trainingNullFreq: Proportion of null entries in training feature.
   :type trainingNullFreq: float
   :param predictionNullFreq: Proportion of null entries in prediction feature.
   :type predictionNullFreq: float


   .. py:attribute:: name
      :value: None



   .. py:attribute:: violation
      :value: None



   .. py:attribute:: training_null_freq
      :value: None



   .. py:attribute:: prediction_null_freq
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: OrganizationExternalApplicationSettings(client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The External Application Settings for an Organization.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param logo: The logo.
   :type logo: str
   :param theme: The theme used for External Applications in this org.
   :type theme: dict
   :param managedUserService: The external service that is managing the user accounts.
   :type managedUserService: str
   :param passwordsDisabled: Whether or not passwords are disabled for this organization's domain.
   :type passwordsDisabled: bool


   .. py:attribute:: logo
      :value: None



   .. py:attribute:: theme
      :value: None



   .. py:attribute:: managed_user_service
      :value: None



   .. py:attribute:: passwords_disabled
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: OrganizationGroup(client, organizationGroupId=None, permissions=None, groupName=None, defaultGroup=None, admin=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An Organization Group. Defines the permissions available to the users who are members of the group.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param organizationGroupId: The unique identifier of the Organization Group.
   :type organizationGroupId: str
   :param permissions: The list of permissions (VIEW, MODIFY, ADMIN, BILLING, API_KEY, INVITE_USER) the group has.
   :type permissions: list of enum string
   :param groupName: The name of the Organization Group.
   :type groupName: str
   :param defaultGroup: If true, all new users will be added to this group automatically.
   :type defaultGroup: bool
   :param admin: If true, this group contains all permissions available to the organization and cannot be modified or deleted.
   :type admin: bool
   :param createdAt: When the Organization Group was created.
   :type createdAt: str


   .. py:attribute:: organization_group_id
      :value: None



   .. py:attribute:: permissions
      :value: None



   .. py:attribute:: group_name
      :value: None



   .. py:attribute:: default_group
      :value: None



   .. py:attribute:: admin
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: OrganizationGroup



   .. py:method:: describe()

      Returns the specific organization group passed in by the user.

      :param organization_group_id: The unique identifier of the organization group to be described.
      :type organization_group_id: str

      :returns: Information about a specific organization group.
      :rtype: OrganizationGroup



   .. py:method:: add_permission(permission)

      Adds a permission to the specified Organization Group.

      :param permission: Permission to add to the Organization Group.
      :type permission: str



   .. py:method:: remove_permission(permission)

      Removes a permission from the specified Organization Group.

      :param permission: The permission to remove from the Organization Group.
      :type permission: str



   .. py:method:: delete()

      Deletes the specified Organization Group

      :param organization_group_id: Unique string identifier of the organization group.
      :type organization_group_id: str



   .. py:method:: add_user_to(email)

      Adds a user to the specified Organization Group.

      :param email: Email of the user to be added to the group.
      :type email: str



   .. py:method:: remove_user_from(email)

      Removes a user from an Organization Group.

      :param email: Email of the user to remove.
      :type email: str



   .. py:method:: set_default()

      Sets the default Organization Group to which all new users joining an organization are automatically added.

      :param organization_group_id: Unique string identifier of the Organization Group.
      :type organization_group_id: str



.. py:class:: OrganizationSearchResult(client, score=None, featureGroupContext=None, featureGroup={}, featureGroupVersion={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A search result object which contains the retrieved artifact and its relevance score

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param score: The relevance score of the search result.
   :type score: float
   :param featureGroupContext: The rendered context for the feature group that can be used in prompts
   :type featureGroupContext: str
   :param featureGroup: The feature group object retrieved through search.
   :type featureGroup: FeatureGroup
   :param featureGroupVersion: The feature group version object retrieved through search.
   :type featureGroupVersion: FeatureGroupVersion


   .. py:attribute:: score
      :value: None



   .. py:attribute:: feature_group_context
      :value: None



   .. py:attribute:: feature_group


   .. py:attribute:: feature_group_version


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: OrganizationSecret(client, secretKey=None, value=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Organization secret

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param secretKey: The key of the secret
   :type secretKey: str
   :param value: The value of the secret
   :type value: str
   :param createdAt: The date and time when the secret was created, in ISO-8601 format.
   :type createdAt: str


   .. py:attribute:: secret_key
      :value: None



   .. py:attribute:: value
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PageData(client, docId=None, page=None, height=None, width=None, pageCount=None, pageText=None, pageTokenStartOffset=None, tokenCount=None, tokens=None, extractedText=None, rotationAngle=None, pageMarkdown=None, embeddedText=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Data extracted from a docstore page.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param docId: Unique Docstore string identifier for the document.
   :type docId: str
   :param page: The page number. Starts from 0.
   :type page: int
   :param height: The height of the page in pixels.
   :type height: int
   :param width: The width of the page in pixels.
   :type width: int
   :param pageCount: The total number of pages in document.
   :type pageCount: int
   :param pageText: The text extracted from the page.
   :type pageText: str
   :param pageTokenStartOffset: The offset of the first token in the page.
   :type pageTokenStartOffset: int
   :param tokenCount: The number of tokens in the page.
   :type tokenCount: int
   :param tokens: The tokens in the page.
   :type tokens: list
   :param extractedText: The extracted text in the page obtained from OCR.
   :type extractedText: str
   :param rotationAngle: The detected rotation angle of the page in degrees. Positive values indicate clockwise and negative values indicate anti-clockwise rotation from the original orientation.
   :type rotationAngle: float
   :param pageMarkdown: The markdown text for the page.
   :type pageMarkdown: str
   :param embeddedText: The embedded text in the page. Only available for digital documents.
   :type embeddedText: str


   .. py:attribute:: doc_id
      :value: None



   .. py:attribute:: page
      :value: None



   .. py:attribute:: height
      :value: None



   .. py:attribute:: width
      :value: None



   .. py:attribute:: page_count
      :value: None



   .. py:attribute:: page_text
      :value: None



   .. py:attribute:: page_token_start_offset
      :value: None



   .. py:attribute:: token_count
      :value: None



   .. py:attribute:: tokens
      :value: None



   .. py:attribute:: extracted_text
      :value: None



   .. py:attribute:: rotation_angle
      :value: None



   .. py:attribute:: page_markdown
      :value: None



   .. py:attribute:: embedded_text
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Pipeline(client, pipelineName=None, pipelineId=None, createdAt=None, notebookId=None, cron=None, nextRunTime=None, isProd=None, warning=None, createdBy=None, steps={}, pipelineReferences={}, latestPipelineVersion={}, codeSource={}, pipelineVariableMappings={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Pipeline For Steps.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param pipelineName: The name of the pipeline this step is a part of.
   :type pipelineName: str
   :param pipelineId: The reference to the pipeline this step belongs to.
   :type pipelineId: str
   :param createdAt: The date and time which the pipeline was created.
   :type createdAt: str
   :param notebookId: The reference to the notebook this pipeline belongs to.
   :type notebookId: str
   :param cron: A cron-style string that describes when this refresh policy is to be executed in UTC
   :type cron: str
   :param nextRunTime: The next time this pipeline will be run.
   :type nextRunTime: str
   :param isProd: Whether this pipeline is a production pipeline.
   :type isProd: bool
   :param warning: Warning message for possible errors that might occur if the pipeline is run.
   :type warning: str
   :param createdBy: The email of the user who created the pipeline
   :type createdBy: str
   :param steps: A list of the pipeline steps attached to the pipeline.
   :type steps: PipelineStep
   :param pipelineReferences: A list of references from the pipeline to other objects
   :type pipelineReferences: PipelineReference
   :param latestPipelineVersion: The latest version of the pipeline.
   :type latestPipelineVersion: PipelineVersion
   :param codeSource: information on the source code
   :type codeSource: CodeSource
   :param pipelineVariableMappings: A description of the function variables into the pipeline.
   :type pipelineVariableMappings: PythonFunctionArgument


   .. py:attribute:: pipeline_name
      :value: None



   .. py:attribute:: pipeline_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: cron
      :value: None



   .. py:attribute:: next_run_time
      :value: None



   .. py:attribute:: is_prod
      :value: None



   .. py:attribute:: warning
      :value: None



   .. py:attribute:: created_by
      :value: None



   .. py:attribute:: steps


   .. py:attribute:: pipeline_references


   .. py:attribute:: latest_pipeline_version


   .. py:attribute:: code_source


   .. py:attribute:: pipeline_variable_mappings


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Pipeline



   .. py:method:: describe()

      Describes a given pipeline.

      :param pipeline_id: The ID of the pipeline to describe.
      :type pipeline_id: str

      :returns: An object describing a Pipeline
      :rtype: Pipeline



   .. py:method:: update(project_id = None, pipeline_variable_mappings = None, cron = None, is_prod = None)

      Updates a pipeline for executing multiple steps.

      :param project_id: A unique string identifier for the pipeline.
      :type project_id: str
      :param pipeline_variable_mappings: List of Python function arguments for the pipeline.
      :type pipeline_variable_mappings: List
      :param cron: A cron-like string specifying the frequency of the scheduled pipeline runs.
      :type cron: str
      :param is_prod: Whether the pipeline is a production pipeline or not.
      :type is_prod: bool

      :returns: An object that describes a Pipeline.
      :rtype: Pipeline



   .. py:method:: rename(pipeline_name)

      Renames a pipeline.

      :param pipeline_name: The new name of the pipeline.
      :type pipeline_name: str

      :returns: An object that describes a Pipeline.
      :rtype: Pipeline



   .. py:method:: delete()

      Deletes a pipeline.

      :param pipeline_id: The ID of the pipeline to delete.
      :type pipeline_id: str



   .. py:method:: list_versions(limit = 200)

      Lists the pipeline versions for a specified pipeline

      :param limit: The maximum number of pipeline versions to return.
      :type limit: int

      :returns: A list of pipeline versions.
      :rtype: list[PipelineVersion]



   .. py:method:: run(pipeline_variable_mappings = None)

      Runs a specified pipeline with the arguments provided.

      :param pipeline_variable_mappings: List of Python function arguments for the pipeline.
      :type pipeline_variable_mappings: List

      :returns: The object describing the pipeline
      :rtype: PipelineVersion



   .. py:method:: create_step(step_name, function_name = None, source_code = None, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None, timeout = None)

      Creates a step in a given pipeline.

      :param step_name: The name of the step.
      :type step_name: str
      :param function_name: The name of the Python function.
      :type function_name: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List
      :param output_variable_mappings: List of Python function outputs.
      :type output_variable_mappings: List
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: list
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int
      :param timeout: Timeout for the step in minutes, default is 300 minutes.
      :type timeout: int

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: describe_step_by_name(step_name)

      Describes a pipeline step by the step name.

      :param step_name: The name of the step.
      :type step_name: str

      :returns: An object describing the pipeline step.
      :rtype: PipelineStep



   .. py:method:: unset_refresh_schedule()

      Deletes the refresh schedule for a given pipeline.

      :param pipeline_id: The id of the pipeline.
      :type pipeline_id: str

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: pause_refresh_schedule()

      Pauses the refresh schedule for a given pipeline.

      :param pipeline_id: The id of the pipeline.
      :type pipeline_id: str

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: resume_refresh_schedule()

      Resumes the refresh schedule for a given pipeline.

      :param pipeline_id: The id of the pipeline.
      :type pipeline_id: str

      :returns: Object describing the pipeline.
      :rtype: Pipeline



   .. py:method:: create_step_from_function(step_name, function, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None)

      Creates a step in the pipeline from a python function.

      :param step_name: The name of the step.
      :type step_name: str
      :param function: The python function.
      :type function: callable
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List[PythonFunctionArguments]
      :param output_variable_mappings: List of Python function ouputs.
      :type output_variable_mappings: List[OutputVariableMapping]
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: List[str]
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int



   .. py:method:: wait_for_pipeline(timeout=1200)

      A waiting call until all the stages of the latest pipeline version is completed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the pipeline version.

      :returns: A string describing the status of a pipeline version (pending, running, complete, etc.).
      :rtype: str



.. py:class:: PipelineReference(client, pipelineReferenceId=None, pipelineId=None, objectType=None, datasetId=None, modelId=None, deploymentId=None, batchPredictionDescriptionId=None, modelMonitorId=None, notebookId=None, featureGroupId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A reference to a pipeline to the objects it is run on.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param pipelineReferenceId: The id of the reference.
   :type pipelineReferenceId: str
   :param pipelineId: The id of the pipeline for the reference.
   :type pipelineId: str
   :param objectType: The object type of the reference.
   :type objectType: str
   :param datasetId: The dataset id of the reference.
   :type datasetId: str
   :param modelId: The model id of the reference.
   :type modelId: str
   :param deploymentId: The deployment id of the reference.
   :type deploymentId: str
   :param batchPredictionDescriptionId: The batch prediction description id of the reference.
   :type batchPredictionDescriptionId: str
   :param modelMonitorId: The model monitor id of the reference.
   :type modelMonitorId: str
   :param notebookId: The notebook id of the reference.
   :type notebookId: str
   :param featureGroupId: The feature group id of the reference.
   :type featureGroupId: str


   .. py:attribute:: pipeline_reference_id
      :value: None



   .. py:attribute:: pipeline_id
      :value: None



   .. py:attribute:: object_type
      :value: None



   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: batch_prediction_description_id
      :value: None



   .. py:attribute:: model_monitor_id
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PipelineStep(client, pipelineStepId=None, pipelineId=None, stepName=None, pipelineName=None, createdAt=None, updatedAt=None, pythonFunctionId=None, stepDependencies=None, cpuSize=None, memory=None, timeout=None, pythonFunction={}, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A step in a pipeline.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param pipelineStepId: The reference to this step.
   :type pipelineStepId: str
   :param pipelineId: The reference to the pipeline this step belongs to.
   :type pipelineId: str
   :param stepName: The name of the step.
   :type stepName: str
   :param pipelineName: The name of the pipeline this step is a part of.
   :type pipelineName: str
   :param createdAt: The date and time which this step was created.
   :type createdAt: str
   :param updatedAt: The date and time when this step was last updated.
   :type updatedAt: str
   :param pythonFunctionId: The python function_id.
   :type pythonFunctionId: str
   :param stepDependencies: List of steps this step depends on.
   :type stepDependencies: list[str]
   :param cpuSize: CPU size specified for the step function.
   :type cpuSize: str
   :param memory: Memory in GB specified for the step function.
   :type memory: int
   :param timeout: Timeout for the step in minutes, default is 300 minutes.
   :type timeout: int
   :param pythonFunction: Information about the python function for the step.
   :type pythonFunction: PythonFunction
   :param codeSource: Information about the source code of the step function.
   :type codeSource: CodeSource


   .. py:attribute:: pipeline_step_id
      :value: None



   .. py:attribute:: pipeline_id
      :value: None



   .. py:attribute:: step_name
      :value: None



   .. py:attribute:: pipeline_name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: python_function_id
      :value: None



   .. py:attribute:: step_dependencies
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: timeout
      :value: None



   .. py:attribute:: python_function


   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: delete()

      Deletes a step from a pipeline.

      :param pipeline_step_id: The ID of the pipeline step.
      :type pipeline_step_id: str



   .. py:method:: update(function_name = None, source_code = None, step_input_mappings = None, output_variable_mappings = None, step_dependencies = None, package_requirements = None, cpu_size = None, memory = None, timeout = None)

      Creates a step in a given pipeline.

      :param function_name: The name of the Python function.
      :type function_name: str
      :param source_code: Contents of a valid Python source code file. The source code should contain the transform feature group functions. A list of allowed imports and system libraries for each language is specified in the user functions documentation section.
      :type source_code: str
      :param step_input_mappings: List of Python function arguments.
      :type step_input_mappings: List
      :param output_variable_mappings: List of Python function outputs.
      :type output_variable_mappings: List
      :param step_dependencies: List of step names this step depends on.
      :type step_dependencies: list
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param cpu_size: Size of the CPU for the step function.
      :type cpu_size: str
      :param memory: Memory (in GB) for the step function.
      :type memory: int
      :param timeout: Timeout for the pipeline step, default is 300 minutes.
      :type timeout: int

      :returns: Object describing the pipeline.
      :rtype: PipelineStep



   .. py:method:: rename(step_name)

      Renames a step in a given pipeline.

      :param step_name: The name of the step.
      :type step_name: str

      :returns: Object describing the pipeline.
      :rtype: PipelineStep



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: PipelineStep



   .. py:method:: describe()

      Deletes a step from a pipeline.

      :param pipeline_step_id: The ID of the pipeline step.
      :type pipeline_step_id: str

      :returns: An object describing the pipeline step.
      :rtype: PipelineStep



.. py:class:: PipelineStepVersion(client, stepName=None, pipelineStepVersion=None, pipelineStepId=None, pipelineId=None, pipelineVersion=None, createdAt=None, updatedAt=None, status=None, error=None, outputErrors=None, pythonFunctionId=None, functionVariableMappings=None, stepDependencies=None, outputVariableMappings=None, cpuSize=None, memory=None, timeout=None, pipelineStepVersionReferences={}, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of a pipeline step.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param stepName: The name of the step.
   :type stepName: str
   :param pipelineStepVersion: The reference to the pipeline step version.
   :type pipelineStepVersion: str
   :param pipelineStepId: The reference to this step.
   :type pipelineStepId: str
   :param pipelineId: The reference to the pipeline this step belongs to.
   :type pipelineId: str
   :param pipelineVersion: The reference to the pipeline version.
   :type pipelineVersion: str
   :param createdAt: The date and time which this step was created.
   :type createdAt: str
   :param updatedAt: The date and time when this step was last updated.
   :type updatedAt: str
   :param status: The status of the pipeline version.
   :type status: str
   :param error: The error message if the pipeline step failed.
   :type error: str
   :param outputErrors: The error message of a pipeline step's output.
   :type outputErrors: str
   :param pythonFunctionId: The reference to the python function
   :type pythonFunctionId: str
   :param functionVariableMappings: The mappings for function parameters' names.
   :type functionVariableMappings: dict
   :param stepDependencies: List of steps this step depends on.
   :type stepDependencies: list[str]
   :param outputVariableMappings: The mappings for the output variables to the step.
   :type outputVariableMappings: dict
   :param cpuSize: CPU size specified for the step function.
   :type cpuSize: str
   :param memory: Memory in GB specified for the step function.
   :type memory: int
   :param timeout: The timeout in minutes for the pipeline step.
   :type timeout: int
   :param pipelineStepVersionReferences: A list to the output instances of the pipeline step version.
   :type pipelineStepVersionReferences: PipelineStepVersionReference
   :param codeSource: Information about the source code of the pipeline step version.
   :type codeSource: CodeSource


   .. py:attribute:: step_name
      :value: None



   .. py:attribute:: pipeline_step_version
      :value: None



   .. py:attribute:: pipeline_step_id
      :value: None



   .. py:attribute:: pipeline_id
      :value: None



   .. py:attribute:: pipeline_version
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: output_errors
      :value: None



   .. py:attribute:: python_function_id
      :value: None



   .. py:attribute:: function_variable_mappings
      :value: None



   .. py:attribute:: step_dependencies
      :value: None



   .. py:attribute:: output_variable_mappings
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: timeout
      :value: None



   .. py:attribute:: pipeline_step_version_references


   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: PipelineStepVersion



   .. py:method:: describe()

      Describes a pipeline step version.

      :param pipeline_step_version: The ID of the pipeline step version.
      :type pipeline_step_version: str

      :returns: An object describing the pipeline step version.
      :rtype: PipelineStepVersion



   .. py:method:: get_step_version_logs()

      Gets the logs for a given step version.

      :param pipeline_step_version: The id of the pipeline step version.
      :type pipeline_step_version: str

      :returns: Object describing the pipeline step logs.
      :rtype: PipelineStepVersionLogs



.. py:class:: PipelineStepVersionLogs(client, stepName=None, pipelineStepId=None, pipelineStepVersion=None, logs=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs for a given pipeline step version.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param stepName: The name of the step
   :type stepName: str
   :param pipelineStepId: The ID of the step
   :type pipelineStepId: str
   :param pipelineStepVersion: The version of the step
   :type pipelineStepVersion: str
   :param logs: The logs for both stdout and stderr of the step
   :type logs: str


   .. py:attribute:: step_name
      :value: None



   .. py:attribute:: pipeline_step_id
      :value: None



   .. py:attribute:: pipeline_step_version
      :value: None



   .. py:attribute:: logs
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PipelineStepVersionReference(client, pipelineStepVersionReferenceId=None, pipelineStepVersion=None, objectType=None, datasetVersion=None, modelVersion=None, deploymentVersion=None, batchPredictionId=None, modelMonitorVersion=None, notebookVersion=None, featureGroupVersion=None, status=None, error=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A reference from a pipeline step version to the versions that were output from the pipeline step.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param pipelineStepVersionReferenceId: The id of the reference.
   :type pipelineStepVersionReferenceId: str
   :param pipelineStepVersion: The pipeline step version the reference is connected to.
   :type pipelineStepVersion: str
   :param objectType: The object type of the reference.
   :type objectType: str
   :param datasetVersion: The dataset version the reference is connected to.
   :type datasetVersion: str
   :param modelVersion: The model version the reference is connected to.
   :type modelVersion: str
   :param deploymentVersion: The deployment version the reference is connected to.
   :type deploymentVersion: str
   :param batchPredictionId: The batch prediction id the reference is connected to.
   :type batchPredictionId: str
   :param modelMonitorVersion: The model monitor version the reference is connected to.
   :type modelMonitorVersion: str
   :param notebookVersion: The notebook version the reference is connected to.
   :type notebookVersion: str
   :param featureGroupVersion: The feature group version the reference is connected to.
   :type featureGroupVersion: str
   :param status: The status of the reference
   :type status: str
   :param error: The error message if the reference is in an error state.
   :type error: str


   .. py:attribute:: pipeline_step_version_reference_id
      :value: None



   .. py:attribute:: pipeline_step_version
      :value: None



   .. py:attribute:: object_type
      :value: None



   .. py:attribute:: dataset_version
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: deployment_version
      :value: None



   .. py:attribute:: batch_prediction_id
      :value: None



   .. py:attribute:: model_monitor_version
      :value: None



   .. py:attribute:: notebook_version
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PipelineVersion(client, pipelineName=None, pipelineId=None, pipelineVersion=None, createdAt=None, updatedAt=None, completedAt=None, status=None, error=None, stepVersions={}, codeSource={}, pipelineVariableMappings={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of a pipeline.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param pipelineName: The name of the pipeline this step is a part of.
   :type pipelineName: str
   :param pipelineId: The reference to the pipeline this step belongs to.
   :type pipelineId: str
   :param pipelineVersion: The reference to this pipeline version.
   :type pipelineVersion: str
   :param createdAt: The date and time which this pipeline version was created.
   :type createdAt: str
   :param updatedAt: The date and time which this pipeline version was updated.
   :type updatedAt: str
   :param completedAt: The date and time which this pipeline version was updated.
   :type completedAt: str
   :param status: The status of the pipeline version.
   :type status: str
   :param error: The relevant error, if the status is FAILED.
   :type error: str
   :param stepVersions: A list of the pipeline step versions.
   :type stepVersions: PipelineStepVersion
   :param codeSource: information on the source code
   :type codeSource: CodeSource
   :param pipelineVariableMappings: A description of the function variables into the pipeline.
   :type pipelineVariableMappings: PythonFunctionArgument


   .. py:attribute:: pipeline_name
      :value: None



   .. py:attribute:: pipeline_id
      :value: None



   .. py:attribute:: pipeline_version
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: completed_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: step_versions


   .. py:attribute:: code_source


   .. py:attribute:: pipeline_variable_mappings


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: PipelineVersion



   .. py:method:: describe()

      Describes a specified pipeline version

      :param pipeline_version: Unique string identifier for the pipeline version
      :type pipeline_version: str

      :returns: Object describing the pipeline version
      :rtype: PipelineVersion



   .. py:method:: reset(steps = None, include_downstream_steps = True)

      Reruns a pipeline version for the given steps and downstream steps if specified.

      :param steps: List of pipeline step names to rerun.
      :type steps: list
      :param include_downstream_steps: Whether to rerun downstream steps from the steps you have passed
      :type include_downstream_steps: bool

      :returns: Object describing the pipeline version
      :rtype: PipelineVersion



   .. py:method:: list_logs()

      Gets the logs for the steps in a given pipeline version.

      :param pipeline_version: The id of the pipeline version.
      :type pipeline_version: str

      :returns: Object describing the logs for the steps in the pipeline.
      :rtype: PipelineVersionLogs



   .. py:method:: skip_pending_steps()

      Skips pending steps in a pipeline version.

      :param pipeline_version: The id of the pipeline version.
      :type pipeline_version: str

      :returns: Object describing the pipeline version
      :rtype: PipelineVersion



   .. py:method:: wait_for_pipeline(timeout=1200)

      A waiting call until all the stages in a pipeline version have completed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the pipeline version.

      :returns: A string describing the status of a pipeline version (pending, running, complete, etc.).
      :rtype: str



.. py:class:: PipelineVersionLogs(client, stepLogs={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Logs for a given pipeline version.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param stepLogs: A list of the pipeline step version logs.
   :type stepLogs: PipelineStepVersionLogs


   .. py:attribute:: step_logs


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PlaygroundText(client, playgroundText=None, renderingCode=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   The text content inside of a playground segment.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param playgroundText: The text of the playground segment.
   :type playgroundText: str
   :param renderingCode: The rendering code of the playground segment.
   :type renderingCode: str


   .. py:attribute:: playground_text
      :value: None



   .. py:attribute:: rendering_code
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PointInTimeFeature(client, historyTableName=None, aggregationKeys=None, timestampKey=None, historicalTimestampKey=None, lookbackWindowSeconds=None, lookbackWindowLagSeconds=None, lookbackCount=None, lookbackUntilPosition=None, expression=None, groupName=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A point-in-time feature description

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param historyTableName: The name of the history table. If not specified, the current table is used for a self-join.
   :type historyTableName: str
   :param aggregationKeys: List of keys to use for joining the historical table and performing the window aggregation.
   :type aggregationKeys: list[str]
   :param timestampKey: Name of feature which contains the timestamp value for the point-in-time feature.
   :type timestampKey: str
   :param historicalTimestampKey: Name of feature which contains the historical timestamp.
   :type historicalTimestampKey: str
   :param lookbackWindowSeconds: If window is specified in terms of time, the number of seconds in the past from the current time for the start of the window.
   :type lookbackWindowSeconds: float
   :param lookbackWindowLagSeconds: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed. If it is negative, we are looking at the "future" rows in the history table.
   :type lookbackWindowLagSeconds: float
   :param lookbackCount: If window is specified in terms of count, the start position of the window (0 is the current row).
   :type lookbackCount: int
   :param lookbackUntilPosition: Optional lag to offset the closest point for the window. If it is positive, the start of the window is delayed by that many rows. If it is negative, we are looking at those many "future" rows in the history table.
   :type lookbackUntilPosition: int
   :param expression: SQL aggregate expression which can convert a sequence of rows into a scalar value.
   :type expression: str
   :param groupName: The group name this point-in-time feature belongs to.
   :type groupName: str


   .. py:attribute:: history_table_name
      :value: None



   .. py:attribute:: aggregation_keys
      :value: None



   .. py:attribute:: timestamp_key
      :value: None



   .. py:attribute:: historical_timestamp_key
      :value: None



   .. py:attribute:: lookback_window_seconds
      :value: None



   .. py:attribute:: lookback_window_lag_seconds
      :value: None



   .. py:attribute:: lookback_count
      :value: None



   .. py:attribute:: lookback_until_position
      :value: None



   .. py:attribute:: expression
      :value: None



   .. py:attribute:: group_name
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PointInTimeFeatureInfo(client, expression=None, groupName=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A point-in-time infos for a feature

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param expression: SQL aggregate expression which can convert a sequence of rows into a scalar value.
   :type expression: str
   :param groupName: The group name this point-in-time feature belongs to.
   :type groupName: str


   .. py:attribute:: expression
      :value: None



   .. py:attribute:: group_name
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PointInTimeGroup(client, groupName=None, windowKey=None, aggregationKeys=None, lookbackWindow=None, lookbackWindowLag=None, lookbackCount=None, lookbackUntilPosition=None, historyTableName=None, historyWindowKey=None, historyAggregationKeys=None, features={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A point in time group containing point in time features

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param groupName: The name of the point in time group
   :type groupName: str
   :param windowKey: Name of feature which contains the timestamp value for the point in time feature
   :type windowKey: str
   :param aggregationKeys: List of keys to use for join the historical table and performing the window aggregation.
   :type aggregationKeys: list
   :param lookbackWindow: Number of seconds in the past from the current time for start of the window.
   :type lookbackWindow: float
   :param lookbackWindowLag: Optional lag to offset the closest point for the window. If it is positive, we delay the start of window. If it is negative, we are looking at the "future" rows in the history table.
   :type lookbackWindowLag: float
   :param lookbackCount: If window is specified in terms of count, the start position of the window (0 is the current row)
   :type lookbackCount: int
   :param lookbackUntilPosition: Optional lag to offset the closest point for the window. If it is positive, we delay the start of window by that many rows. If it is negative, we are looking at those many "future" rows in the history table.
   :type lookbackUntilPosition: int
   :param historyTableName: The table to use for aggregating, if not provided, the source table will be used
   :type historyTableName: str
   :param historyWindowKey: Name of feature to use for ordering the rows on the history table. If not provided, the windowKey from the source table will be used
   :type historyWindowKey: str
   :param historyAggregationKeys: List of keys to use for join the historical table and performing the window aggregation. If not provided, the aggregationKeys from the source table will be used. Must be the same length and order as the source table's aggregationKeys
   :type historyAggregationKeys: list
   :param features: List of features in the Point in Time group
   :type features: PointInTimeGroupFeature


   .. py:attribute:: group_name
      :value: None



   .. py:attribute:: window_key
      :value: None



   .. py:attribute:: aggregation_keys
      :value: None



   .. py:attribute:: lookback_window
      :value: None



   .. py:attribute:: lookback_window_lag
      :value: None



   .. py:attribute:: lookback_count
      :value: None



   .. py:attribute:: lookback_until_position
      :value: None



   .. py:attribute:: history_table_name
      :value: None



   .. py:attribute:: history_window_key
      :value: None



   .. py:attribute:: history_aggregation_keys
      :value: None



   .. py:attribute:: features


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PointInTimeGroupFeature(client, name=None, expression=None, pitOperationType=None, pitOperationConfig=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A point in time group feature

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the feature
   :type name: str
   :param expression: SQL Aggregate expression which can convert a sequence of rows into a scalar value.
   :type expression: str
   :param pitOperationType: The operation used in point in time feature generation
   :type pitOperationType: str
   :param pitOperationConfig: The configuration used as input to the operation type
   :type pitOperationConfig: dict


   .. py:attribute:: name
      :value: None



   .. py:attribute:: expression
      :value: None



   .. py:attribute:: pit_operation_type
      :value: None



   .. py:attribute:: pit_operation_config
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PredictionClient(client_options = None)

   Bases: :py:obj:`abacusai.client.BaseApiClient`


   Abacus.AI Prediction API Client. Does not utilize authentication and only contains public prediction methods

   :param client_options: Optional API client configurations
   :type client_options: ClientOptions


   .. py:method:: predict_raw(deployment_token, deployment_id, **kwargs)

      Raw interface for returning predictions from Plug and Play deployments.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param \*\*kwargs: Arbitrary key/value pairs may be passed in and is sent as part of the request body.
      :type \*\*kwargs: dict



   .. py:method:: lookup_features(deployment_token, deployment_id, query_data, limit_results = None, result_columns = None)

      Returns the feature group deployed in the feature store project.

      :param deployment_token: A deployment token used to authenticate access to created deployments. This token only authorizes predictions on deployments in this project, so it can be safely embedded inside an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the key is the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the value is the unique value of the same entity.
      :type query_data: dict
      :param limit_results: If provided, will limit the number of results to the value specified.
      :type limit_results: int
      :param result_columns: If provided, will limit the columns present in each result to the columns specified in this list.
      :type result_columns: list



   .. py:method:: predict(deployment_token, deployment_id, query_data, **kwargs)

      Returns a prediction for Predictive Modeling

      :param deployment_token: A deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, and is safe to embed in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the key is the column name (e.g. a column with name 'user_id' in the dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed, and the value is the unique value of the same entity.
      :type query_data: dict



   .. py:method:: predict_multiple(deployment_token, deployment_id, query_data)

      Returns a list of predictions for predictive modeling.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, and is safe to embed in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A list of dictionaries, where the 'key' is the column name (e.g. a column with name 'user_id' in the dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed, and the 'value' is the unique value of the same entity.
      :type query_data: list



   .. py:method:: predict_from_datasets(deployment_token, deployment_id, query_data)

      Returns a list of predictions for Predictive Modeling.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the 'key' is the source dataset name, and the 'value' is a list of records corresponding to the dataset rows.
      :type query_data: dict



   .. py:method:: predict_lead(deployment_token, deployment_id, query_data, explain_predictions = False, explainer_type = None)

      Returns the probability of a user being a lead based on their interaction with the service/product and their own attributes (e.g. income, assets, credit score, etc.). Note that the inputs to this method, wherever applicable, should be the column names in the dataset mapped to the column mappings in our system (e.g. column 'user_id' mapped to mapping 'LEAD_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary containing user attributes and/or user's interaction data with the product/service (e.g. number of clicks, items in cart, etc.).
      :type query_data: dict
      :param explain_predictions: Will explain predictions for leads
      :type explain_predictions: bool
      :param explainer_type: Type of explainer to use for explanations
      :type explainer_type: str



   .. py:method:: predict_churn(deployment_token, deployment_id, query_data, explain_predictions = False, explainer_type = None)

      Returns the probability of a user to churn out in response to their interactions with the item/product/service. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'churn_result' mapped to mapping 'CHURNED_YN' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where the 'key' will be the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the 'value' will be the unique value of the same entity.
      :type query_data: dict
      :param explain_predictions: Will explain predictions for churn
      :type explain_predictions: bool
      :param explainer_type: Type of explainer to use for explanations
      :type explainer_type: str



   .. py:method:: predict_takeover(deployment_token, deployment_id, query_data)

      Returns a probability for each class label associated with the types of fraud or a 'yes' or 'no' type label for the possibility of fraud. Note that the inputs to this method, wherever applicable, will be the column names in the dataset mapped to the column mappings in our system (e.g., column 'account_name' mapped to mapping 'ACCOUNT_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary containing account activity characteristics (e.g., login id, login duration, login type, IP address, etc.).
      :type query_data: dict



   .. py:method:: predict_fraud(deployment_token, deployment_id, query_data)

      Returns the probability of a transaction performed under a specific account being fraudulent or not. Note that the inputs to this method, wherever applicable, should be the column names in your dataset mapped to the column mappings in our system (e.g. column 'account_number' mapped to the mapping 'ACCOUNT_ID' in our system).

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary containing transaction attributes (e.g. credit card type, transaction location, transaction amount, etc.).
      :type query_data: dict



   .. py:method:: predict_class(deployment_token, deployment_id, query_data, threshold = None, threshold_class = None, thresholds = None, explain_predictions = False, fixed_features = None, nested = None, explainer_type = None)

      Returns a classification prediction

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the 'Key' is the column name (e.g. a column with the name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the 'Value' is the unique value of the same entity.
      :type query_data: dict
      :param threshold: A float value that is applied on the popular class label.
      :type threshold: float
      :param threshold_class: The label upon which the threshold is added (binary labels only).
      :type threshold_class: str
      :param thresholds: Maps labels to thresholds (multi-label classification only). Defaults to F1 optimal threshold if computed for the given class, else uses 0.5.
      :type thresholds: Dict
      :param explain_predictions: If True, returns the SHAP explanations for all input features.
      :type explain_predictions: bool
      :param fixed_features: A set of input features to treat as constant for explanations - only honored when the explainer type is KERNEL_EXPLAINER
      :type fixed_features: list
      :param nested: If specified generates prediction delta for each index of the specified nested feature.
      :type nested: str
      :param explainer_type: The type of explainer to use.
      :type explainer_type: str



   .. py:method:: predict_target(deployment_token, deployment_id, query_data, explain_predictions = False, fixed_features = None, nested = None, explainer_type = None)

      Returns a prediction from a classification or regression model. Optionally, includes explanations.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the 'key' is the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the entity against which a prediction is performed and the 'value' is the unique value of the same entity.
      :type query_data: dict
      :param explain_predictions: If true, returns the SHAP explanations for all input features.
      :type explain_predictions: bool
      :param fixed_features: Set of input features to treat as constant for explanations - only honored when the explainer type is KERNEL_EXPLAINER
      :type fixed_features: list
      :param nested: If specified, generates prediction delta for each index of the specified nested feature.
      :type nested: str
      :param explainer_type: The type of explainer to use.
      :type explainer_type: str



   .. py:method:: get_anomalies(deployment_token, deployment_id, threshold = None, histogram = False)

      Returns a list of anomalies from the training dataset.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param threshold: The threshold score of what is an anomaly. Valid values are between 0.8 and 0.99.
      :type threshold: float
      :param histogram: If True, will return a histogram of the distribution of all points.
      :type histogram: bool



   .. py:method:: get_timeseries_anomalies(deployment_token, deployment_id, start_timestamp = None, end_timestamp = None, query_data = None, get_all_item_data = False, series_ids = None)

      Returns a list of anomalous timestamps from the training dataset.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param start_timestamp: timestamp from which anomalies have to be detected in the training data
      :type start_timestamp: str
      :param end_timestamp: timestamp to which anomalies have to be detected in the training data
      :type end_timestamp: str
      :param query_data: additional data on which anomaly detection has to be performed, it can either be a single record or list of records or a json string representing list of records
      :type query_data: dict
      :param get_all_item_data: set this to true if anomaly detection has to be performed on all the data related to input ids
      :type get_all_item_data: bool
      :param series_ids: list of series ids on which the anomaly detection has to be performed
      :type series_ids: List



   .. py:method:: is_anomaly(deployment_token, deployment_id, query_data = None)

      Returns a list of anomaly attributes based on login information for a specified account. Note that the inputs to this method, wherever applicable, should be the column names in the dataset mapped to the column mappings in our system (e.g. column 'account_name' mapped to mapping 'ACCOUNT_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: The input data for the prediction.
      :type query_data: dict



   .. py:method:: get_event_anomaly_score(deployment_token, deployment_id, query_data = None)

      Returns an anomaly score for an event.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: The input data for the prediction.
      :type query_data: dict



   .. py:method:: get_forecast(deployment_token, deployment_id, query_data, future_data = None, num_predictions = None, prediction_start = None, explain_predictions = False, explainer_type = None, get_item_data = False)

      Returns a list of forecasts for a given entity under the specified project deployment. Note that the inputs to the deployed model will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'holiday_yn' mapped to mapping 'FUTURE' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where 'Key' will be the column name (e.g. a column with name 'store_id' in your dataset) mapped to the column mapping ITEM_ID that uniquely identifies the entity against which forecasting is performed and 'Value' will be the unique value of the same entity.
      :type query_data: dict
      :param future_data: This will be a list of values known ahead of time that are relevant for forecasting (e.g. State Holidays, National Holidays, etc.). Each element is a dictionary, where the key and the value both will be of type 'str'. For example future data entered for a Store may be [{"Holiday":"No", "Promo":"Yes", "Date": "2015-07-31 00:00:00"}].
      :type future_data: list
      :param num_predictions: The number of timestamps to predict in the future.
      :type num_predictions: int
      :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
      :type prediction_start: str
      :param explain_predictions: Will explain predictions for forecasting
      :type explain_predictions: bool
      :param explainer_type: Type of explainer to use for explanations
      :type explainer_type: str
      :param get_item_data: Will return the data corresponding to items in query
      :type get_item_data: bool



   .. py:method:: get_k_nearest(deployment_token, deployment_id, vector, k = None, distance = None, include_score = False, catalog_id = None)

      Returns the k nearest neighbors for the provided embedding vector.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param vector: Input vector to perform the k nearest neighbors with.
      :type vector: list
      :param k: Overrideable number of items to return.
      :type k: int
      :param distance: Specify the distance function to use. Options include “dot“, “cosine“, “euclidean“, and “manhattan“. Default = “dot“
      :type distance: str
      :param include_score: If True, will return the score alongside the resulting embedding value.
      :type include_score: bool
      :param catalog_id: An optional parameter honored only for embeddings that provide a catalog id
      :type catalog_id: str



   .. py:method:: get_multiple_k_nearest(deployment_token, deployment_id, queries)

      Returns the k nearest neighbors for the queries provided.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param queries: List of mappings of format {"catalogId": "cat0", "vectors": [...], "k": 20, "distance": "euclidean"}. See `getKNearest` for additional information about the supported parameters.
      :type queries: list



   .. py:method:: get_labels(deployment_token, deployment_id, query_data, return_extracted_entities = False)

      Returns a list of scored labels for a document.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: Dictionary where key is "Content" and value is the text from which entities are to be extracted.
      :type query_data: dict
      :param return_extracted_entities: (Optional) If True, will return the extracted entities in simpler format
      :type return_extracted_entities: bool



   .. py:method:: get_entities_from_pdf(deployment_token, deployment_id, pdf = None, doc_id = None, return_extracted_features = False, verbose = False, save_extracted_features = None)

      Extracts text from the provided PDF and returns a list of recognized labels and their scores.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param pdf: (Optional) The pdf to predict on. One of pdf or docId must be specified.
      :type pdf: io.TextIOBase
      :param doc_id: (Optional) The pdf to predict on. One of pdf or docId must be specified.
      :type doc_id: str
      :param return_extracted_features: (Optional) If True, will return all extracted features (e.g. all tokens in a page) from the PDF. Default is False.
      :type return_extracted_features: bool
      :param verbose: (Optional) If True, will return all the extracted tokens probabilities for all the trained labels. Default is False.
      :type verbose: bool
      :param save_extracted_features: (Optional) If True, will save extracted features (i.e. page tokens) so that they can be fetched using the prediction docId. Default is False.
      :type save_extracted_features: bool



   .. py:method:: get_recommendations(deployment_token, deployment_id, query_data, num_items = None, page = None, exclude_item_ids = None, score_field = None, scaling_factors = None, restrict_items = None, exclude_items = None, explore_fraction = None, diversity_attribute_name = None, diversity_max_results_per_value = None)

      Returns a list of recommendations for a given user under the specified project deployment. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'time' mapped to mapping 'TIMESTAMP' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where 'Key' will be the column name (e.g. a column with name 'user_name' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the user against which recommendations are made and 'Value' will be the unique value of the same item. For example, if you have the column name 'user_name' mapped to the column mapping 'USER_ID', then the query must have the exact same column name (user_name) as key and the name of the user (John Doe) as value.
      :type query_data: dict
      :param num_items: The number of items to recommend on one page. By default, it is set to 50 items per page.
      :type num_items: int
      :param page: The page number to be displayed. For example, let's say that the num_items is set to 10 with the total recommendations list size of 50 recommended items, then an input value of 2 in the 'page' variable will display a list of items that rank from 11th to 20th.
      :type page: int
      :param score_field: The relative item scores are returned in a separate field named with the same name as the key (score_field) for this argument.
      :type score_field: str
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted.  Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there's a type of item that might be less popular but you want to promote it or there's an item that always comes up and you want to demote it.
      :type scaling_factors: list
      :param restrict_items: It allows you to restrict the recommendations to certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", "value3", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1", "value3", ...]" to which to restrict the recommendations to. Let's take an example where the input to restrict_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. This input will restrict the recommendations to SUVs and Sedans. This type of restriction is particularly useful if there's a list of items that you know is of use in some particular scenario and you want to restrict the recommendations only to that list.
      :type restrict_items: list
      :param exclude_items: It allows you to exclude certain items from the list of recommendations. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" to exclude from the recommendations. Let's take an example where the input to exclude_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. The resulting recommendation list will exclude all SUVs and Sedans. This is
      :type exclude_items: list
      :param explore_fraction: Explore fraction.
      :type explore_fraction: float
      :param diversity_attribute_name: item attribute column name which is used to ensure diversity of prediction results.
      :type diversity_attribute_name: str
      :param diversity_max_results_per_value: maximum number of results per value of diversity_attribute_name.
      :type diversity_max_results_per_value: int



   .. py:method:: get_personalized_ranking(deployment_token, deployment_id, query_data, preserve_ranks = None, preserve_unknown_items = False, scaling_factors = None)

      Returns a list of items with personalized promotions for a given user under the specified project deployment. Note that the inputs to this method, wherever applicable, should be the column names in the dataset mapped to the column mappings in our system (e.g. column 'item_code' mapped to mapping 'ITEM_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This should be a dictionary with two key-value pairs. The first pair represents a 'Key' where the column name (e.g. a column with name 'user_id' in the dataset) mapped to the column mapping USER_ID uniquely identifies the user against whom a prediction is made and a 'Value' which is the identifier value for that user. The second pair will have a 'Key' which will be the name of the column name (e.g. movie_name) mapped to ITEM_ID (unique item identifier) and a 'Value' which will be a list of identifiers that uniquely identifies those items.
      :type query_data: dict
      :param preserve_ranks: List of dictionaries of format {"column": "col0", "values": ["value0, value1"]}, where the ranks of items in query_data is preserved for all the items in "col0" with values, "value0" and "value1". This option is useful when the desired items are being recommended in the desired order and the ranks for those items need to be kept unchanged during recommendation generation.
      :type preserve_ranks: list
      :param preserve_unknown_items: If true, any items that are unknown to the model, will not be reranked, and the original position in the query will be preserved.
      :type preserve_unknown_items: bool
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted. Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there's a type of item that might be less popular but you want to promote it or there's an item that always comes up and you want to demote it.
      :type scaling_factors: list



   .. py:method:: get_ranked_items(deployment_token, deployment_id, query_data, preserve_ranks = None, preserve_unknown_items = False, score_field = None, scaling_factors = None, diversity_attribute_name = None, diversity_max_results_per_value = None)

      Returns a list of re-ranked items for a selected user when a list of items is required to be reranked according to the user's preferences. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'item_code' mapped to mapping 'ITEM_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary with two key-value pairs. The first pair represents a 'Key' where the column name (e.g. a column with name 'user_id' in your dataset) mapped to the column mapping USER_ID uniquely identifies the user against whom a prediction is made and a 'Value' which is the identifier value for that user. The second pair will have a 'Key' which will be the name of the column name (e.g. movie_name) mapped to ITEM_ID (unique item identifier) and a 'Value' which will be a list of identifiers that uniquely identifies those items.
      :type query_data: dict
      :param preserve_ranks: List of dictionaries of format {"column": "col0", "values": ["value0, value1"]}, where the ranks of items in query_data is preserved for all the items in "col0" with values, "value0" and "value1". This option is useful when the desired items are being recommended in the desired order and the ranks for those items need to be kept unchanged during recommendation generation.
      :type preserve_ranks: list
      :param preserve_unknown_items: If true, any items that are unknown to the model, will not be reranked, and the original position in the query will be preserved
      :type preserve_unknown_items: bool
      :param score_field: The relative item scores are returned in a separate field named with the same name as the key (score_field) for this argument.
      :type score_field: str
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted. Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there is a type of item that might be less popular but you want to promote it or there is an item that always comes up and you want to demote it.
      :type scaling_factors: list
      :param diversity_attribute_name: item attribute column name which is used to ensure diversity of prediction results.
      :type diversity_attribute_name: str
      :param diversity_max_results_per_value: maximum number of results per value of diversity_attribute_name.
      :type diversity_max_results_per_value: int



   .. py:method:: get_related_items(deployment_token, deployment_id, query_data, num_items = None, page = None, scaling_factors = None, restrict_items = None, exclude_items = None)

      Returns a list of related items for a given item under the specified project deployment. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'item_code' mapped to mapping 'ITEM_ID' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: This will be a dictionary where the 'key' will be the column name (e.g. a column with name 'user_name' in your dataset) mapped to the column mapping USER_ID that uniquely identifies the user against which related items are determined and the 'value' will be the unique value of the same item. For example, if you have the column name 'user_name' mapped to the column mapping 'USER_ID', then the query must have the exact same column name (user_name) as key and the name of the user (John Doe) as value.
      :type query_data: dict
      :param num_items: The number of items to recommend on one page. By default, it is set to 50 items per page.
      :type num_items: int
      :param page: The page number to be displayed. For example, let's say that the num_items is set to 10 with the total recommendations list size of 50 recommended items, then an input value of 2 in the 'page' variable will display a list of items that rank from 11th to 20th.
      :type page: int
      :param scaling_factors: It allows you to bias the model towards certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1"], "factor": 1.1}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" in reference to which the model recommendations need to be biased; and the key, "factor" takes the factor by which the item scores are adjusted.  Let's take an example where the input to scaling_factors is [{"column": "VehicleType", "values": ["SUV", "Sedan"], "factor": 1.4}]. After we apply the model to get item probabilities, for every SUV and Sedan in the list, we will multiply the respective probability by 1.1 before sorting. This is particularly useful if there's a type of item that might be less popular but you want to promote it or there's an item that always comes up and you want to demote it.
      :type scaling_factors: list
      :param restrict_items: It allows you to restrict the recommendations to certain items. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", "value3", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1", "value3", ...]" to which to restrict the recommendations to. Let's take an example where the input to restrict_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. This input will restrict the recommendations to SUVs and Sedans. This type of restriction is particularly useful if there's a list of items that you know is of use in some particular scenario and you want to restrict the recommendations only to that list.
      :type restrict_items: list
      :param exclude_items: It allows you to exclude certain items from the list of recommendations. The input to this argument is a list of dictionaries where the format of each dictionary is as follows: {"column": "col0", "values": ["value0", "value1", ...]}. The key, "column" takes the name of the column, "col0"; the key, "values" takes the list of items, "["value0", "value1"]" to exclude from the recommendations. Let's take an example where the input to exclude_items is [{"column": "VehicleType", "values": ["SUV", "Sedan"]}]. The resulting recommendation list will exclude all SUVs and Sedans. This is particularly useful if there's a list of items that you know is of no use in some particular scenario and you don't want to show those items present in that list.
      :type exclude_items: list



   .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None)

      Return a chat response which continues the conversation based on the input messages and search results.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param messages: A list of chronologically ordered messages, starting with a user message and alternating sources. A message is a dict with attributes:     is_user (bool): Whether the message is from the user.      text (str): The message's text.
      :type messages: list
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifying the query chat config override.
      :type chat_config: dict



   .. py:method:: get_chat_response_with_binary_data(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, attachments = None)

      Return a chat response which continues the conversation based on the input messages and search results.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param messages: A list of chronologically ordered messages, starting with a user message and alternating sources. A message is a dict with attributes:     is_user (bool): Whether the message is from the user.      text (str): The message's text.
      :type messages: list
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifying the query chat config override.
      :type chat_config: dict
      :param attachments: A dictionary of binary data to use to answer the queries.
      :type attachments: None



   .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)

      Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param message: A message from the user
      :type message: str
      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrived search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifiying the query chat config override.
      :type chat_config: dict
      :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list



   .. py:method:: get_conversation_response_with_binary_data(deployment_id, deployment_token, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, attachments = None)

      Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).

      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param message: A message from the user
      :type message: str
      :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
      :type deployment_conversation_id: str
      :param external_session_id: The user supplied unique identifier of a deployment conversation to continue. If specified, we will use this instead of a internal deployment conversation id.
      :type external_session_id: str
      :param llm_name: Name of the specific LLM backend to use to power the chat experience
      :type llm_name: str
      :param num_completion_tokens: Default for maximum number of tokens for chat answers
      :type num_completion_tokens: int
      :param system_message: The generative LLM system message
      :type system_message: str
      :param temperature: The generative LLM temperature
      :type temperature: float
      :param filter_key_values: A dictionary mapping column names to a list of values to restrict the retrived search results.
      :type filter_key_values: dict
      :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
      :type search_score_cutoff: float
      :param chat_config: A dictionary specifiying the query chat config override.
      :type chat_config: dict
      :param attachments: A dictionary of binary data to use to answer the queries.
      :type attachments: None



   .. py:method:: get_search_results(deployment_token, deployment_id, query_data, num = 15)

      Return the most relevant search results to the search query from the uploaded documents.

      :param deployment_token: A token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be securely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where the key is "Content" and the value is the text from which entities are to be extracted.
      :type query_data: dict
      :param num: Number of search results to return.
      :type num: int



   .. py:method:: get_sentiment(deployment_token, deployment_id, document)

      Predicts sentiment on a document

      :param deployment_token: A token used to authenticate access to deployments created in this project. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for a deployment created under this project.
      :type deployment_id: str
      :param document: The document to be analyzed for sentiment.
      :type document: str



   .. py:method:: get_entailment(deployment_token, deployment_id, document)

      Predicts the classification of the document

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param document: The document to be classified.
      :type document: str



   .. py:method:: get_classification(deployment_token, deployment_id, document)

      Predicts the classification of the document

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param document: The document to be classified.
      :type document: str



   .. py:method:: get_summary(deployment_token, deployment_id, query_data)

      Returns a JSON of the predicted summary for the given document. Note that the inputs to this method, wherever applicable, will be the column names in your dataset mapped to the column mappings in our system (e.g. column 'text' mapped to mapping 'DOCUMENT' in our system).

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: Raw data dictionary containing the required document data - must have a key 'document' corresponding to a DOCUMENT type text as value.
      :type query_data: dict



   .. py:method:: predict_language(deployment_token, deployment_id, query_data)

      Predicts the language of the text

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments within this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: The input string to detect.
      :type query_data: str



   .. py:method:: get_assignments(deployment_token, deployment_id, query_data, forced_assignments = None, solve_time_limit_seconds = None, include_all_assignments = False)

      Get all positive assignments that match a query.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
      :type query_data: dict
      :param forced_assignments: Set of assignments to force and resolve before returning query results.
      :type forced_assignments: dict
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float
      :param include_all_assignments: If True, will return all assignments, including assignments with value 0. Default is False.
      :type include_all_assignments: bool



   .. py:method:: get_alternative_assignments(deployment_token, deployment_id, query_data, add_constraints = None, solve_time_limit_seconds = None, best_alternate_only = False)

      Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: Specifies the set of assignments being requested. The value for the key can be: 1. A simple scalar value, which is matched exactly 2. A list of values, which matches any element in the list 3. A dictionary with keys lower_in/lower_ex and upper_in/upper_ex, which matches values in an inclusive/exclusive range
      :type query_data: dict
      :param add_constraints: List of constraints dict to apply to the query. The constraint dict should have the following keys: 1. query (dict): Specifies the set of assignment variables involved in the constraint. The format is same as query_data. 2. operator (str): Constraint operator '=' or '<=' or '>='. 3. constant (int): Constraint RHS constant value. 4. coefficient_column (str): Column in Assignment feature group to be used as coefficient for the assignment variables, optional and defaults to 1
      :type add_constraints: list
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float
      :param best_alternate_only: When True only the best alternate will be returned, when False multiple alternates are returned
      :type best_alternate_only: bool



   .. py:method:: get_assignments_online_with_new_serialized_inputs(deployment_token, deployment_id, query_data = None, solve_time_limit_seconds = None)

      Get assignments for given query, with new inputs

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param query_data: a dictionary with assignment, constraint and constraint_equations_df
      :type query_data: dict
      :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
      :type solve_time_limit_seconds: float



   .. py:method:: check_constraints(deployment_token, deployment_id, query_data)

      Check for any constraints violated by the overrides.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param query_data: Assignment overrides to the solution.
      :type query_data: dict



   .. py:method:: predict_with_binary_data(deployment_token, deployment_id, blob)

      Make predictions for a given blob, e.g. image, audio

      :param deployment_token: A token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param blob: The multipart/form-data of the data.
      :type blob: io.TextIOBase



   .. py:method:: describe_image(deployment_token, deployment_id, image, categories, top_n = None)

      Describe the similarity between an image and a list of categories.

      :param deployment_token: Authentication token to access created deployments. This token is only authorized to predict on deployments in the current project, and can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: Unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param image: Image to describe.
      :type image: io.TextIOBase
      :param categories: List of candidate categories to compare with the image.
      :type categories: list
      :param top_n: Return the N most similar categories.
      :type top_n: int



   .. py:method:: get_text_from_document(deployment_token, deployment_id, document = None, adjust_doc_orientation = False, save_predicted_pdf = False, save_extracted_features = False)

      Generate text from a document

      :param deployment_token: Authentication token to access created deployments. This token is only authorized to predict on deployments in the current project, and can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: Unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param document: Input document which can be an image, pdf, or word document (Some formats might not be supported yet)
      :type document: io.TextIOBase
      :param adjust_doc_orientation: (Optional) whether to detect the document page orientation and rotate it if needed.
      :type adjust_doc_orientation: bool
      :param save_predicted_pdf: (Optional) If True, will save the predicted pdf bytes so that they can be fetched using the prediction docId. Default is False.
      :type save_predicted_pdf: bool
      :param save_extracted_features: (Optional) If True, will save extracted features (i.e. page tokens) so that they can be fetched using the prediction docId. Default is False.
      :type save_extracted_features: bool



   .. py:method:: transcribe_audio(deployment_token, deployment_id, audio)

      Transcribe the audio

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to make predictions on deployments in this project, so it can be safely embedded in an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier of a deployment created under the project.
      :type deployment_id: str
      :param audio: The audio to transcribe.
      :type audio: io.TextIOBase



   .. py:method:: classify_image(deployment_token, deployment_id, image = None, doc_id = None)

      Classify an image.

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier to a deployment created under the project.
      :type deployment_id: str
      :param image: The binary data of the image to classify. One of image or doc_id must be specified.
      :type image: io.TextIOBase
      :param doc_id: The document ID of the image. One of image or doc_id must be specified.
      :type doc_id: str



   .. py:method:: classify_pdf(deployment_token, deployment_id, pdf = None)

      Returns a classification prediction from a PDF

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier for a deployment created under the project.
      :type deployment_id: str
      :param pdf: (Optional) The pdf to predict on. One of pdf or docId must be specified.
      :type pdf: io.TextIOBase



   .. py:method:: get_cluster(deployment_token, deployment_id, query_data)

      Predicts the cluster for given data.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param query_data: A dictionary where each 'key' represents a column name and its corresponding 'value' represents the value of that column. For Timeseries Clustering, the 'key' should be ITEM_ID, and its value should represent a unique item ID that needs clustering.
      :type query_data: dict



   .. py:method:: get_objects_from_image(deployment_token, deployment_id, image)

      Classify an image.

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier to a deployment created under the project.
      :type deployment_id: str
      :param image: The binary data of the image to detect objects from.
      :type image: io.TextIOBase



   .. py:method:: score_image(deployment_token, deployment_id, image)

      Score on image.

      :param deployment_token: A deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier to a deployment created under the project.
      :type deployment_id: str
      :param image: The binary data of the image to get the score.
      :type image: io.TextIOBase



   .. py:method:: transfer_style(deployment_token, deployment_id, source_image, style_image)

      Change the source image to adopt the visual style from the style image.

      :param deployment_token: A token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param source_image: The source image to apply the makeup.
      :type source_image: io.TextIOBase
      :param style_image: The image that has the style as a reference.
      :type style_image: io.TextIOBase



   .. py:method:: generate_image(deployment_token, deployment_id, query_data)

      Generate an image from text prompt.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
      :type deployment_token: str
      :param deployment_id: A unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param query_data: Specifies the text prompt. For example, {'prompt': 'a cat'}
      :type query_data: dict



   .. py:method:: execute_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None)

      Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict



   .. py:method:: get_matrix_agent_schema(deployment_token, deployment_id, query, doc_infos = None, deployment_conversation_id = None, external_session_id = None)

      Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param query: User input query to initialize the matrix computation.
      :type query: str
      :param doc_infos: An optional list of documents use for constructing the matrix. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str
      :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
      :type external_session_id: str



   .. py:method:: execute_conversation_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, regenerate = False, doc_infos = None, agent_workflow_node_id = None)

      Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str
      :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
      :type external_session_id: str
      :param regenerate: If True, will regenerate the response from the last query.
      :type regenerate: bool
      :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
      :type doc_infos: list
      :param agent_workflow_node_id: An optional agent workflow node id to trigger agent execution from an intermediate node.
      :type agent_workflow_node_id: str



   .. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None, filter_clause = None, crowding_limits = None, include_text_search = False)

      Lookup document retrievers and return the matching documents from the document retriever deployed with given query.

      Original documents are splitted into chunks and stored in the document retriever. This lookup function will return the relevant chunks
      from the document retriever. The returned chunks could be expanded to include more words from the original documents and merged if they
      are overlapping, and permitted by the settings provided. The returned chunks are sorted by relevance.


      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments within this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param data: The query to search for.
      :type data: str
      :param filters: A dictionary mapping column names to a list of values to restrict the retrieved search results.
      :type filters: dict
      :param num: If provided, will limit the number of results to the value specified.
      :type num: int
      :param result_columns: If provided, will limit the column properties present in each result to those specified in this list.
      :type result_columns: list
      :param max_words: If provided, will limit the total number of words in the results to the value specified.
      :type max_words: int
      :param num_retrieval_margin_words: If provided, will add this number of words from left and right of the returned chunks.
      :type num_retrieval_margin_words: int
      :param max_words_per_chunk: If provided, will limit the number of words in each chunk to the value specified. If the value provided is smaller than the actual size of chunk on disk, which is determined during document retriever creation, the actual size of chunk will be used. I.e, chunks looked up from document retrievers will not be split into smaller chunks during lookup due to this setting.
      :type max_words_per_chunk: int
      :param score_multiplier_column: If provided, will use the values in this column to modify the relevance score of the returned chunks. Values in this column must be numeric.
      :type score_multiplier_column: str
      :param min_score: If provided, will filter out the results with score less than the value specified.
      :type min_score: float
      :param required_phrases: If provided, each result will contain at least one of the phrases in the given list. The matching is whitespace and case insensitive.
      :type required_phrases: list
      :param filter_clause: If provided, filter the results of the query using this sql where clause.
      :type filter_clause: str
      :param crowding_limits: A dictionary mapping metadata columns to the maximum number of results per unique value of the column. This is used to ensure diversity of metadata attribute values in the results. If a particular attribute value has already reached its maximum count, further results with that same attribute value will be excluded from the final result set. An entry in the map can also be a map specifying the limit per attribute value rather than a single limit for all values. This allows a per value limit for attributes. If an attribute value is not present in the map its limit defaults to zero.
      :type crowding_limits: dict
      :param include_text_search: If true, combine the ranking of results from a BM25 text search over the documents with the vector search using reciprocal rank fusion. It leverages both lexical and semantic matching for better overall results. It's particularly valuable in professional, technical, or specialized fields where both precision in terminology and understanding of context are important.
      :type include_text_search: bool

      :returns: The relevant documentation results found from the document retriever.
      :rtype: list[DocumentRetrieverLookupResult]



   .. py:method:: get_completion(deployment_token, deployment_id, prompt)

      Returns the finetuned LLM generated completion of the prompt.

      :param deployment_token: The deployment token to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: The unique identifier to a deployment created under the project.
      :type deployment_id: str
      :param prompt: The prompt given to the finetuned LLM to generate the completion.
      :type prompt: str



   .. py:method:: execute_agent_with_binary_data(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, blobs = None)

      Executes a deployed AI agent function with binary data as inputs.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str
      :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
      :type external_session_id: str
      :param blobs: A dictionary of binary data to use as inputs to the agent execute function.
      :type blobs: None

      :returns: The result of the agent execution
      :rtype: AgentDataExecutionResult



   .. py:method:: start_autonomous_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, save_conversations = True)

      Starts a deployed Autonomous agent associated with the given deployment_conversation_id using the arguments and keyword arguments as inputs for execute function of trigger node.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param arguments: Positional arguments to the agent execute function.
      :type arguments: list
      :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
      :type keyword_arguments: dict
      :param save_conversations: If true then a new conversation will be created for every run of the workflow associated with the agent.
      :type save_conversations: bool



   .. py:method:: pause_autonomous_agent(deployment_token, deployment_id, deployment_conversation_id)

      Pauses a deployed Autonomous agent associated with the given deployment_conversation_id.

      :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, making it safe to embed this model in an application or website.
      :type deployment_token: str
      :param deployment_id: A unique string identifier for the deployment created under the project.
      :type deployment_id: str
      :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
      :type deployment_conversation_id: str



.. py:class:: PredictionDataset(client, datasetId=None, datasetType=None, datasetVersion=None, default=None, required=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Batch Input Datasets

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param datasetId: The unique identifier of the dataset
   :type datasetId: str
   :param datasetType: dataset type
   :type datasetType: str
   :param datasetVersion: The unique identifier of the dataset version used for predictions
   :type datasetVersion: str
   :param default: If true, this dataset is the default dataset in the model
   :type default: bool
   :param required: If true, this dataset is required for the batch prediction
   :type required: bool


   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: dataset_type
      :value: None



   .. py:attribute:: dataset_version
      :value: None



   .. py:attribute:: default
      :value: None



   .. py:attribute:: required
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PredictionFeatureGroup(client, featureGroupId=None, featureGroupVersion=None, datasetType=None, default=None, required=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Batch Input Feature Group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: The unique identifier of the feature group
   :type featureGroupId: str
   :param featureGroupVersion: The unique identifier of the feature group version used for predictions
   :type featureGroupVersion: str
   :param datasetType: dataset type
   :type datasetType: str
   :param default: If true, this feature group is the default feature group in the model
   :type default: bool
   :param required: If true, this feature group is required for the batch prediction
   :type required: bool


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: dataset_type
      :value: None



   .. py:attribute:: default
      :value: None



   .. py:attribute:: required
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PredictionInput(client, featureGroupDatasetIds=None, datasetIdRemap=None, featureGroups={}, datasets={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Batch inputs

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupDatasetIds: The list of dataset IDs to use as input
   :type featureGroupDatasetIds: list
   :param datasetIdRemap: Replacement datasets to swap as prediction input
   :type datasetIdRemap: dict
   :param featureGroups: List of prediction feature groups
   :type featureGroups: PredictionFeatureGroup
   :param datasets: List of prediction datasets
   :type datasets: PredictionDataset


   .. py:attribute:: feature_group_dataset_ids
      :value: None



   .. py:attribute:: dataset_id_remap
      :value: None



   .. py:attribute:: feature_groups


   .. py:attribute:: datasets


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PredictionLogRecord(client, requestId=None, query=None, queryTimeMs=None, timestampMs=None, response=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Record for a prediction request log.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param requestId: The unique identifier of the prediction request.
   :type requestId: str
   :param query: The query used to make the prediction.
   :type query: dict
   :param queryTimeMs: The time taken to make the prediction.
   :type queryTimeMs: int
   :param timestampMs: The timestamp of the prediction request.
   :type timestampMs: str
   :param response: The prediction response.
   :type response: dict


   .. py:attribute:: request_id
      :value: None



   .. py:attribute:: query
      :value: None



   .. py:attribute:: query_time_ms
      :value: None



   .. py:attribute:: timestamp_ms
      :value: None



   .. py:attribute:: response
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PredictionOperator(client, name=None, predictionOperatorId=None, createdAt=None, updatedAt=None, projectId=None, predictFunctionName=None, sourceCode=None, initializeFunctionName=None, notebookId=None, memory=None, useGpu=None, featureGroupIds=None, featureGroupTableNames=None, codeSource={}, refreshSchedules={}, latestPredictionOperatorVersion={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A prediction operator.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name for the prediction operator.
   :type name: str
   :param predictionOperatorId: The unique identifier of the prediction operator.
   :type predictionOperatorId: str
   :param createdAt: Date and time at which the prediction operator was created.
   :type createdAt: str
   :param updatedAt: Date and time at which the prediction operator was updated.
   :type updatedAt: str
   :param projectId: The project this prediction operator belongs to.
   :type projectId: str
   :param predictFunctionName: Name of the function found in the source code that will be executed to run predictions.
   :type predictFunctionName: str
   :param sourceCode: Python code used to make the prediction operator.
   :type sourceCode: str
   :param initializeFunctionName: Name of the optional initialize function found in the source code. This function will generate anything used by predictions, based on input feature groups.
   :type initializeFunctionName: str
   :param notebookId: The unique string identifier of the notebook used to create or edit the prediction operator.
   :type notebookId: str
   :param memory: Memory in GB specified for the prediction operator.
   :type memory: int
   :param useGpu: Whether this prediction operator is using gpu.
   :type useGpu: bool
   :param featureGroupIds: A list of Feature Group IDs used for initializing.
   :type featureGroupIds: list
   :param featureGroupTableNames: A list of Feature Group table names used for initializing.
   :type featureGroupTableNames: list
   :param codeSource: If a python model, information on the source code.
   :type codeSource: CodeSource
   :param latestPredictionOperatorVersion: The unique string identifier of the latest version.
   :type latestPredictionOperatorVersion: PredictionOperatorVersion
   :param refreshSchedules: List of refresh schedules that indicate when the next prediction operator version will be processed
   :type refreshSchedules: RefreshSchedule


   .. py:attribute:: name
      :value: None



   .. py:attribute:: prediction_operator_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: predict_function_name
      :value: None



   .. py:attribute:: source_code
      :value: None



   .. py:attribute:: initialize_function_name
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: feature_group_ids
      :value: None



   .. py:attribute:: feature_group_table_names
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: refresh_schedules


   .. py:attribute:: latest_prediction_operator_version


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: PredictionOperator



   .. py:method:: describe()

      Describe an existing prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str

      :returns: The requested prediction operator object.
      :rtype: PredictionOperator



   .. py:method:: update(name = None, feature_group_ids = None, source_code = None, initialize_function_name = None, predict_function_name = None, cpu_size = None, memory = None, package_requirements = None, use_gpu = None)

      Update an existing prediction operator. This does not create a new version.

      :param name: Name of the prediction operator.
      :type name: str
      :param feature_group_ids: List of feature groups that are supplied to the initialize function as parameters. Each of the parameters are materialized Dataframes. The order should match the initialize function's parameters.
      :type feature_group_ids: List
      :param source_code: Contents of a valid Python source code file. The source code should contain the function `predictFunctionName`, and the function 'initializeFunctionName' if defined.
      :type source_code: str
      :param initialize_function_name: Name of the optional initialize function found in the source code. This function will generate anything used by predictions, based on input feature groups.
      :type initialize_function_name: str
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions.
      :type predict_function_name: str
      :param cpu_size: Size of the CPU for the prediction operator.
      :type cpu_size: str
      :param memory: Memory (in GB) for the  prediction operator.
      :type memory: int
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_gpu: Whether this prediction operator needs gpu.
      :type use_gpu: bool

      :returns: The updated prediction operator object.
      :rtype: PredictionOperator



   .. py:method:: delete()

      Delete an existing prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str



   .. py:method:: deploy(auto_deploy = True)

      Deploy the prediction operator.

      :param auto_deploy: Flag to enable the automatic deployment when a new prediction operator version is created.
      :type auto_deploy: bool

      :returns: The created deployment object.
      :rtype: Deployment



   .. py:method:: create_version()

      Create a new version of the prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str

      :returns: The created prediction operator version object.
      :rtype: PredictionOperatorVersion



   .. py:method:: list_versions()

      List all the prediction operator versions for a prediction operator.

      :param prediction_operator_id: The unique ID of the prediction operator.
      :type prediction_operator_id: str

      :returns: A list of prediction operator version objects.
      :rtype: list[PredictionOperatorVersion]



.. py:class:: PredictionOperatorVersion(client, predictionOperatorId=None, predictionOperatorVersion=None, createdAt=None, updatedAt=None, sourceCode=None, memory=None, useGpu=None, featureGroupIds=None, featureGroupVersions=None, status=None, error=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A prediction operator version.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param predictionOperatorId: The unique identifier of the prediction operator.
   :type predictionOperatorId: str
   :param predictionOperatorVersion: The unique identifier of the prediction operator version.
   :type predictionOperatorVersion: str
   :param createdAt: Date and time at which the prediction operator was created.
   :type createdAt: str
   :param updatedAt: Date and time at which the prediction operator was updated.
   :type updatedAt: str
   :param sourceCode: Python code used to make the prediction operator.
   :type sourceCode: str
   :param memory: Memory in GB specified for the prediction operator version.
   :type memory: int
   :param useGpu: Whether this prediction operator version is using gpu.
   :type useGpu: bool
   :param featureGroupIds: A list of Feature Group IDs used for initializing.
   :type featureGroupIds: list
   :param featureGroupVersions: A list of Feature Group version IDs used for initializing.
   :type featureGroupVersions: list
   :param status: The current status of the prediction operator version.
   :type status: str
   :param error: The error message if the status failed.
   :type error: str
   :param codeSource: If a python model, information on the source code.
   :type codeSource: CodeSource


   .. py:attribute:: prediction_operator_id
      :value: None



   .. py:attribute:: prediction_operator_version
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: updated_at
      :value: None



   .. py:attribute:: source_code
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: feature_group_ids
      :value: None



   .. py:attribute:: feature_group_versions
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: delete()

      Delete a prediction operator version.

      :param prediction_operator_version: The unique ID of the prediction operator version.
      :type prediction_operator_version: str



.. py:class:: ProblemType(client, problemType=None, requiredFeatureGroupType=None, optionalFeatureGroupTypes=None, useCasesSupportCustomAlgorithm=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Description of a problem type which is the common underlying problem for different use cases.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param problemType: Name of the problem type
   :type problemType: str
   :param requiredFeatureGroupType: The required feature group types to train for this problem type
   :type requiredFeatureGroupType: str
   :param optionalFeatureGroupTypes: The optional feature group types can be used to train for this problem type
   :type optionalFeatureGroupTypes: list[str]
   :param useCasesSupportCustomAlgorithm: A list of use cases that support custom algorithms
   :type useCasesSupportCustomAlgorithm: list


   .. py:attribute:: problem_type
      :value: None



   .. py:attribute:: required_feature_group_type
      :value: None



   .. py:attribute:: optional_feature_group_types
      :value: None



   .. py:attribute:: use_cases_support_custom_algorithm
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Project(client, projectId=None, name=None, useCase=None, problemType=None, createdAt=None, tags=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A project is a container which holds datasets, models and deployments

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param projectId: The ID of the project.
   :type projectId: str
   :param name: The name of the project.
   :type name: str
   :param useCase: The use case associated with the project.
   :type useCase: str
   :param problemType: The problem type associated with the project.
   :type problemType: str
   :param createdAt: The date and time when the project was created.
   :type createdAt: str
   :param tags: List of tags associated with the project.
   :type tags: list[str]


   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: use_case
      :value: None



   .. py:attribute:: problem_type
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: tags
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Project



   .. py:method:: describe()

      Returns a description of a project.

      :param project_id: A unique string identifier for the project.
      :type project_id: str

      :returns: The description of the project.
      :rtype: Project



   .. py:method:: rename(name)

      This method renames a project after it is created.

      :param name: The new name for the project.
      :type name: str



   .. py:method:: delete(force_delete = False)

      Delete a specified project from your organization.

      This method deletes the project, its associated trained models, and deployments. The datasets attached to the specified project remain available for use with other projects in the organization.

      This method will not delete a project that contains active deployments. Ensure that all active deployments are stopped before using the delete option.

      Note: All projects, models, and deployments cannot be recovered once they are deleted.


      :param force_delete: If True, the project will be deleted even if it has active deployments.
      :type force_delete: bool



   .. py:method:: add_tags(tags)

      This method adds a tag to a project.

      :param tags: The tags to add to the project.
      :type tags: list



   .. py:method:: remove_tags(tags)

      This method removes a tag from a project.

      :param tags: The tags to remove from the project.
      :type tags: list



   .. py:method:: set_feature_mapping(feature_group_id, feature_name, feature_mapping = None, nested_column_name = None)

      Set a column's feature mapping. If the column mapping is single-use and already set in another column in this feature group, this call will first remove the other column's mapping and move it to this column.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str
      :param feature_name: The name of the feature.
      :type feature_name: str
      :param feature_mapping: The mapping of the feature in the feature group.
      :type feature_mapping: str
      :param nested_column_name: The name of the nested column if the input feature is part of a nested feature group for the given feature_group_id.
      :type nested_column_name: str

      :returns: A list of objects that describes the resulting feature group's schema after the feature's featureMapping is set.
      :rtype: list[Feature]



   .. py:method:: validate(feature_group_ids = None)

      Validates that the specified project has all required feature group types for its use case and that all required feature columns are set.

      :param feature_group_ids: The list of feature group IDs to validate.
      :type feature_group_ids: List

      :returns: The project validation. If the specified project is missing required columns or feature groups, the response includes an array of objects for each missing required feature group and the missing required features in each feature group.
      :rtype: ProjectValidation



   .. py:method:: infer_feature_mappings(feature_group_id)

      Infer the feature mappings for the feature group in the project based on the problem type.

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str

      :returns: A dict that contains the inferred feature mappings.
      :rtype: InferredFeatureMappings



   .. py:method:: describe_feature_group(feature_group_id)

      Describe a feature group associated with a project

      :param feature_group_id: The unique ID associated with the feature group.
      :type feature_group_id: str

      :returns: The project feature group object.
      :rtype: ProjectFeatureGroup



   .. py:method:: list_feature_groups(filter_feature_group_use = None, limit = 100, start_after_id = None)

      List all the feature groups associated with a project

      :param filter_feature_group_use: The feature group use filter, when given as an argument only allows feature groups present in this project to be returned if they are of the given use. Possible values are: 'USER_CREATED', 'BATCH_PREDICTION_OUTPUT'.
      :type filter_feature_group_use: str
      :param limit: The maximum number of feature groups to be retrieved.
      :type limit: int
      :param start_after_id: An offset parameter to exclude all feature groups up to a specified ID.
      :type start_after_id: str

      :returns: All the Feature Groups in a project.
      :rtype: list[ProjectFeatureGroup]



   .. py:method:: list_feature_group_templates(limit = 100, start_after_id = None, should_include_all_system_templates = False)

      List feature group templates for feature groups associated with the project.

      :param limit: Maximum number of templates to be retrieved.
      :type limit: int
      :param start_after_id: Offset parameter to exclude all templates till the specified feature group template ID.
      :type start_after_id: str
      :param should_include_all_system_templates: If True, will include built-in templates.
      :type should_include_all_system_templates: bool

      :returns: All the feature groups in the organization, optionally limited by the feature group that created the template(s).
      :rtype: list[FeatureGroupTemplate]



   .. py:method:: get_training_config_options(feature_group_ids = None, for_retrain = False, current_training_config = None)

      Retrieves the full initial description of the model training configuration options available for the specified project. The configuration options available are determined by the use case associated with the specified project. Refer to the [Use Case Documentation]({USE_CASES_URL}) for more information on use cases and use case-specific configuration options.

      :param feature_group_ids: The feature group IDs to be used for training.
      :type feature_group_ids: List
      :param for_retrain: Whether the training config options are used for retraining.
      :type for_retrain: bool
      :param current_training_config: The current state of the training config, with some options set, which shall be used to get new options after refresh. This is `None` by default initially.
      :type current_training_config: TrainingConfig

      :returns: An array of options that can be specified when training a model in this project.
      :rtype: list[TrainingConfigOptions]



   .. py:method:: create_train_test_data_split_feature_group(training_config, feature_group_ids)

      Get the train and test data split without training the model. Only supported for models with custom algorithms.

      :param training_config: The training config used to influence how the split is calculated.
      :type training_config: TrainingConfig
      :param feature_group_ids: List of feature group IDs provided by the user, including the required one for data split and others to influence how to split.
      :type feature_group_ids: List

      :returns: The feature group containing the training data and folds information.
      :rtype: FeatureGroup



   .. py:method:: train_model(name = None, training_config = None, feature_group_ids = None, refresh_schedule = None, custom_algorithms = None, custom_algorithms_only = False, custom_algorithm_configs = None, builtin_algorithms = None, cpu_size = None, memory = None, algorithm_training_configs = None)

      Create a new model and start its training in the given project.

      :param name: The name of the model. Defaults to "<Project Name> Model".
      :type name: str
      :param training_config: The training config used to train this model.
      :type training_config: TrainingConfig
      :param feature_group_ids: List of feature group IDs provided by the user to train the model on.
      :type feature_group_ids: List
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically retrain the created model.
      :type refresh_schedule: str
      :param custom_algorithms: List of user-defined algorithms to train. If not set, the default enabled custom algorithms will be used.
      :type custom_algorithms: list
      :param custom_algorithms_only: Whether to only run custom algorithms.
      :type custom_algorithms_only: bool
      :param custom_algorithm_configs: Configs for each user-defined algorithm; key is the algorithm name, value is the config serialized to JSON.
      :type custom_algorithm_configs: dict
      :param builtin_algorithms: List of algorithm names or algorithm IDs of the builtin algorithms provided by Abacus.AI to train. If not set, all applicable builtin algorithms will be used.
      :type builtin_algorithms: list
      :param cpu_size: Size of the CPU for the user-defined algorithms during training.
      :type cpu_size: str
      :param memory: Memory (in GB) for the user-defined algorithms during training.
      :type memory: int
      :param algorithm_training_configs: List of algorithm specifc training configs that will be part of the model training AutoML run.
      :type algorithm_training_configs: list

      :returns: The new model which is being trained.
      :rtype: Model



   .. py:method:: create_model_from_python(function_source_code, train_function_name, training_input_tables, predict_function_name = None, predict_many_function_name = None, initialize_function_name = None, name = None, cpu_size = None, memory = None, training_config = None, exclusive_run = False, package_requirements = None, use_gpu = False, is_thread_safe = None)

      Initializes a new Model from user-provided Python code. If a list of input feature groups is supplied, they will be provided as arguments to the train and predict functions with the materialized feature groups for those input feature groups.

      This method expects `functionSourceCode` to be a valid language source file which contains the functions named `trainFunctionName` and `predictFunctionName`. `trainFunctionName` returns the ModelVersion that is the result of training the model using `trainFunctionName` and `predictFunctionName` has no well-defined return type, as it returns the prediction made by the `predictFunctionName`, which can be anything.


      :param function_source_code: Contents of a valid Python source code file. The source code should contain the functions named `trainFunctionName` and `predictFunctionName`. A list of allowed import and system libraries for each language is specified in the user functions documentation section.
      :type function_source_code: str
      :param train_function_name: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
      :type train_function_name: str
      :param training_input_tables: List of feature groups that are supplied to the train function as parameters. Each of the parameters are materialized Dataframes (same type as the functions return value).
      :type training_input_tables: list
      :param predict_function_name: Name of the function found in the source code that will be executed to run predictions through the model. It is not executed when this function is run.
      :type predict_function_name: str
      :param predict_many_function_name: Name of the function found in the source code that will be executed for batch prediction of the model. It is not executed when this function is run.
      :type predict_many_function_name: str
      :param initialize_function_name: Name of the function found in the source code to initialize the trained model before using it to make predictions using the model
      :type initialize_function_name: str
      :param name: The name you want your model to have. Defaults to "<Project Name> Model"
      :type name: str
      :param cpu_size: Size of the CPU for the model training function
      :type cpu_size: str
      :param memory: Memory (in GB) for the model training function
      :type memory: int
      :param training_config: Training configuration
      :type training_config: TrainingConfig
      :param exclusive_run: Decides if this model will be run exclusively or along with other Abacus.AI algorithms
      :type exclusive_run: bool
      :param package_requirements: List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0']
      :type package_requirements: list
      :param use_gpu: Whether this model needs gpu
      :type use_gpu: bool
      :param is_thread_safe: Whether this model is thread safe
      :type is_thread_safe: bool

      :returns: The new model, which has not been trained.
      :rtype: Model



   .. py:method:: list_models()

      Retrieves the list of models in the specified project.

      :param project_id: Unique string identifier associated with the project.
      :type project_id: str

      :returns: A list of models.
      :rtype: list[Model]



   .. py:method:: get_custom_train_function_info(feature_group_names_for_training = None, training_data_parameter_name_override = None, training_config = None, custom_algorithm_config = None)

      Returns information about how to call the custom train function.

      :param feature_group_names_for_training: A list of feature group table names to be used for training.
      :type feature_group_names_for_training: list
      :param training_data_parameter_name_override: Override from feature group type to parameter name in the train function.
      :type training_data_parameter_name_override: dict
      :param training_config: Training config for the options supported by the Abacus.AI platform.
      :type training_config: TrainingConfig
      :param custom_algorithm_config: User-defined config that can be serialized by JSON.
      :type custom_algorithm_config: dict

      :returns: Information about how to call the customer-provided train function.
      :rtype: CustomTrainFunctionInfo



   .. py:method:: create_model_monitor(prediction_feature_group_id, training_feature_group_id = None, name = None, refresh_schedule = None, target_value = None, target_value_bias = None, target_value_performance = None, feature_mappings = None, model_id = None, training_feature_mappings = None, feature_group_base_monitor_config = None, feature_group_comparison_monitor_config = None, exclude_interactive_performance_analysis = True, exclude_bias_analysis = None, exclude_performance_analysis = None, exclude_feature_drift_analysis = None, exclude_data_integrity_analysis = None)

      Runs a model monitor for the specified project.

      :param prediction_feature_group_id: The unique ID of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: The unique ID of the training data feature group.
      :type training_feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically retrain the created model monitor.
      :type refresh_schedule: str
      :param target_value: A target positive value for the label to compute bias and PR/AUC for performance page.
      :type target_value: str
      :param target_value_bias: A target positive value for the label to compute bias.
      :type target_value_bias: str
      :param target_value_performance: A target positive value for the label to compute PR curve/AUC for performance page.
      :type target_value_performance: str
      :param feature_mappings: A JSON map to override features for prediction_feature_group, where keys are column names and the values are feature data use types.
      :type feature_mappings: dict
      :param model_id: The unique ID of the model.
      :type model_id: str
      :param training_feature_mappings: A JSON map to override features for training_fature_group, where keys are column names and the values are feature data use types.
      :type training_feature_mappings: dict
      :param feature_group_base_monitor_config: Selection strategy for the feature_group 1 with the feature group version if selected.
      :type feature_group_base_monitor_config: dict
      :param feature_group_comparison_monitor_config: Selection strategy for the feature_group 1 with the feature group version if selected.
      :type feature_group_comparison_monitor_config: dict
      :param exclude_interactive_performance_analysis: Whether to exclude interactive performance analysis. Defaults to True if not provided.
      :type exclude_interactive_performance_analysis: bool
      :param exclude_bias_analysis: Whether to exclude bias analysis in the model monitor. For default value bias analysis is included.
      :type exclude_bias_analysis: bool
      :param exclude_performance_analysis: Whether to exclude performance analysis in the model monitor. For default value performance analysis is included.
      :type exclude_performance_analysis: bool
      :param exclude_feature_drift_analysis: Whether to exclude feature drift analysis in the model monitor. For default value feature drift analysis is included.
      :type exclude_feature_drift_analysis: bool
      :param exclude_data_integrity_analysis: Whether to exclude data integrity analysis in the model monitor. For default value data integrity analysis is included.
      :type exclude_data_integrity_analysis: bool

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: list_model_monitors(limit = None)

      Retrieves the list of model monitors in the specified project.

      :param limit: Maximum number of model monitors to return. We'll have internal limit if not set.
      :type limit: int

      :returns: A list of model monitors.
      :rtype: list[ModelMonitor]



   .. py:method:: create_vision_drift_monitor(prediction_feature_group_id, training_feature_group_id, name, feature_mappings, training_feature_mappings, target_value_performance = None, refresh_schedule = None)

      Runs a vision drift monitor for the specified project.

      :param prediction_feature_group_id: Unique string identifier of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: Unique string identifier of the training data feature group.
      :type training_feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param feature_mappings: A JSON map to override features for prediction_feature_group, where keys are column names and the values are feature data use types.
      :type feature_mappings: dict
      :param training_feature_mappings: A JSON map to override features for training_feature_group, where keys are column names and the values are feature data use types.
      :type training_feature_mappings: dict
      :param target_value_performance: A target positive value for the label to compute precision-recall curve/area under curve for performance page.
      :type target_value_performance: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically rerun the created vision drift monitor.
      :type refresh_schedule: str

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: create_nlp_drift_monitor(prediction_feature_group_id, training_feature_group_id, name, feature_mappings, training_feature_mappings, target_value_performance = None, refresh_schedule = None)

      Runs an NLP drift monitor for the specified project.

      :param prediction_feature_group_id: Unique string identifier of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: Unique string identifier of the training data feature group.
      :type training_feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param feature_mappings: A JSON map to override features for prediction_feature_group, where keys are column names and the values are feature data use types.
      :type feature_mappings: dict
      :param training_feature_mappings: A JSON map to override features for training_feature_group, where keys are column names and the values are feature data use types.
      :type training_feature_mappings: dict
      :param target_value_performance: A target positive value for the label to compute precision-recall curve/area under curve for performance page.
      :type target_value_performance: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically rerun the created nlp drift monitor.
      :type refresh_schedule: str

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: create_forecasting_monitor(name, prediction_feature_group_id, training_feature_group_id, training_forecast_config, prediction_forecast_config, forecast_frequency, refresh_schedule = None)

      Runs a forecasting monitor for the specified project.

      :param name: The name you want your model monitor to have. Defaults to "<Project Name> Model Monitor".
      :type name: str
      :param prediction_feature_group_id: Unique string identifier of the prediction data feature group.
      :type prediction_feature_group_id: str
      :param training_feature_group_id: Unique string identifier of the training data feature group.
      :type training_feature_group_id: str
      :param training_forecast_config: The configuration for the training data.
      :type training_forecast_config: ForecastingMonitorConfig
      :param prediction_forecast_config: The configuration for the prediction data.
      :type prediction_forecast_config: ForecastingMonitorConfig
      :param forecast_frequency: The frequency of the forecast. Defaults to the frequency of the prediction data.
      :type forecast_frequency: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically rerun the created forecasting monitor.
      :type refresh_schedule: str

      :returns: The new model monitor that was created.
      :rtype: ModelMonitor



   .. py:method:: create_eda(feature_group_id, name, refresh_schedule = None, include_collinearity = False, include_data_consistency = False, collinearity_keys = None, primary_keys = None, data_consistency_test_config = None, data_consistency_reference_config = None, feature_mappings = None, forecast_frequency = None)

      Run an Exploratory Data Analysis (EDA) for the specified project.

      :param feature_group_id: The unique ID of the prediction data feature group.
      :type feature_group_id: str
      :param name: The name you want your model monitor to have. Defaults to "<Project Name> EDA".
      :type name: str
      :param refresh_schedule: A cron-style string that describes a schedule in UTC to automatically retrain the created EDA.
      :type refresh_schedule: str
      :param include_collinearity: Set to True if the EDA type is collinearity.
      :type include_collinearity: bool
      :param include_data_consistency: Set to True if the EDA type is data consistency.
      :type include_data_consistency: bool
      :param collinearity_keys: List of features to use for collinearity
      :type collinearity_keys: list
      :param primary_keys: List of features that corresponds to the primary keys or item ids for the given feature group for Data Consistency analysis or Forecasting analysis respectively.
      :type primary_keys: list
      :param data_consistency_test_config: Test feature group version selection strategy for Data Consistency EDA type.
      :type data_consistency_test_config: dict
      :param data_consistency_reference_config: Reference feature group version selection strategy for Data Consistency EDA type.
      :type data_consistency_reference_config: dict
      :param feature_mappings: A JSON map to override features for the given feature_group, where keys are column names and the values are feature data use types. (In forecasting, used to set the timestamp column and target value)
      :type feature_mappings: dict
      :param forecast_frequency: The frequency of the data. It can be either HOURLY, DAILY, WEEKLY, MONTHLY, QUARTERLY, YEARLY.
      :type forecast_frequency: str

      :returns: The new EDA object that was created.
      :rtype: Eda



   .. py:method:: list_eda()

      Retrieves the list of Exploratory Data Analysis (EDA) in the specified project.

      :param project_id: Unique string identifier associated with the project.
      :type project_id: str

      :returns: List of EDA objects.
      :rtype: list[Eda]



   .. py:method:: list_holdout_analysis(model_id = None)

      List holdout analyses for a project. Optionally, filter by model.

      :param model_id: (optional) ID of the model to filter by
      :type model_id: str

      :returns: The holdout analyses
      :rtype: list[HoldoutAnalysis]



   .. py:method:: create_monitor_alert(alert_name, condition_config, action_config, model_monitor_id = None, realtime_monitor_id = None)

      Create a monitor alert for the given conditions and monitor. We can create monitor alert either for model monitor or real-time monitor.

      :param alert_name: Name of the alert.
      :type alert_name: str
      :param condition_config: Condition to run the actions for the alert.
      :type condition_config: AlertConditionConfig
      :param action_config: Configuration for the action of the alert.
      :type action_config: AlertActionConfig
      :param model_monitor_id: Unique string identifier for the model monitor created under the project.
      :type model_monitor_id: str
      :param realtime_monitor_id: Unique string identifier for the real-time monitor for the deployment created under the project.
      :type realtime_monitor_id: str

      :returns: Object describing the monitor alert.
      :rtype: MonitorAlert



   .. py:method:: list_prediction_operators()

      List all the prediction operators inside a project.

      :param project_id: The unique ID of the project.
      :type project_id: str

      :returns: A list of prediction operator objects.
      :rtype: list[PredictionOperator]



   .. py:method:: create_deployment_token(name = None)

      Creates a deployment token for the specified project.

      Deployment tokens are used to authenticate requests to the prediction APIs and are scoped to the project level.


      :param name: The name of the deployment token.
      :type name: str

      :returns: The deployment token.
      :rtype: DeploymentAuthToken



   .. py:method:: list_deployments()

      Retrieves a list of all deployments in the specified project.

      :param project_id: The unique identifier associated with the project.
      :type project_id: str

      :returns: An array of deployments.
      :rtype: list[Deployment]



   .. py:method:: list_deployment_tokens()

      Retrieves a list of all deployment tokens associated with the specified project.

      :param project_id: The unique ID associated with the project.
      :type project_id: str

      :returns: A list of deployment tokens.
      :rtype: list[DeploymentAuthToken]



   .. py:method:: list_realtime_monitors()

      List the real-time monitors associated with the deployment id.

      :param project_id: Unique string identifier for the deployment.
      :type project_id: str

      :returns: An array of real-time monitors.
      :rtype: list[RealtimeMonitor]



   .. py:method:: list_refresh_policies(dataset_ids = [], feature_group_id = None, model_ids = [], deployment_ids = [], batch_prediction_ids = [], model_monitor_ids = [], notebook_ids = [])

      List the refresh policies for the organization. If no filters are specified, all refresh policies are returned.

      :param dataset_ids: Comma-separated list of Dataset IDs.
      :type dataset_ids: List
      :param feature_group_id: Feature Group ID for which we wish to see the refresh policies attached.
      :type feature_group_id: str
      :param model_ids: Comma-separated list of Model IDs.
      :type model_ids: List
      :param deployment_ids: Comma-separated list of Deployment IDs.
      :type deployment_ids: List
      :param batch_prediction_ids: Comma-separated list of Batch Prediction IDs.
      :type batch_prediction_ids: List
      :param model_monitor_ids: Comma-separated list of Model Monitor IDs.
      :type model_monitor_ids: List
      :param notebook_ids: Comma-separated list of Notebook IDs.
      :type notebook_ids: List

      :returns: List of all refresh policies in the organization.
      :rtype: list[RefreshPolicy]



   .. py:method:: list_batch_predictions(limit = None)

      Retrieves a list of batch predictions in the project.

      :param limit: Maximum number of batch predictions to return. We'll have internal limit if not set.
      :type limit: int

      :returns: List of batch prediction jobs.
      :rtype: list[BatchPrediction]



   .. py:method:: list_pipelines()

      Lists the pipelines for an organization or a project

      :param project_id: Unique string identifier for the project to list graph dashboards from.
      :type project_id: str

      :returns: A list of pipelines.
      :rtype: list[Pipeline]



   .. py:method:: create_graph_dashboard(name, python_function_ids = None)

      Create a plot dashboard given selected python plots

      :param name: The name of the dashboard.
      :type name: str
      :param python_function_ids: A list of unique string identifiers for the python functions to be used in the graph dashboard.
      :type python_function_ids: List

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: list_graph_dashboards()

      Lists the graph dashboards for a project

      :param project_id: Unique string identifier for the project to list graph dashboards from.
      :type project_id: str

      :returns: A list of graph dashboards.
      :rtype: list[GraphDashboard]



   .. py:method:: list_builtin_algorithms(feature_group_ids, training_config = None)

      Return list of built-in algorithms based on given input data and training config.

      :param feature_group_ids: List of feature group IDs specifying input data.
      :type feature_group_ids: List
      :param training_config: The training config to be used for model training.
      :type training_config: TrainingConfig

      :returns: List of applicable builtin algorithms.
      :rtype: list[Algorithm]



   .. py:method:: create_chat_session(name = None)

      Creates a chat session with Data Science Co-pilot.

      :param name: The name of the chat session. Defaults to the project name.
      :type name: str

      :returns: The chat session with Data Science Co-pilot
      :rtype: ChatSession



   .. py:method:: create_agent(function_source_code = None, agent_function_name = None, name = None, memory = None, package_requirements = [], description = None, enable_binary_input = False, evaluation_feature_group_id = None, agent_input_schema = None, agent_output_schema = None, workflow_graph = None, agent_interface = AgentInterface.DEFAULT, included_modules = None, org_level_connectors = None, user_level_connectors = None, initialize_function_name = None, initialize_function_code = None)

      Creates a new AI agent using the given agent workflow graph definition.

      :param name: The name you want your agent to have, defaults to "<Project Name> Agent".
      :type name: str
      :param memory: Overrides the default memory allocation (in GB) for the agent.
      :type memory: int
      :param package_requirements: A list of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
      :type package_requirements: list
      :param description: A description of the agent, including its purpose and instructions.
      :type description: str
      :param evaluation_feature_group_id: The ID of the feature group to use for evaluation.
      :type evaluation_feature_group_id: str
      :param workflow_graph: The workflow graph for the agent.
      :type workflow_graph: WorkflowGraph
      :param agent_interface: The interface that the agent will be deployed with.
      :type agent_interface: AgentInterface
      :param included_modules: A list of user created custom modules to include in the agent's environment.
      :type included_modules: List
      :param org_level_connectors: A list of org level connector ids to be used by the agent.
      :type org_level_connectors: List
      :param user_level_connectors: A dictionary mapping ApplicationConnectorType keys to lists of OAuth scopes. Each key represents a specific user level application connector, while the value is a list of scopes that define the permissions granted to the application.
      :type user_level_connectors: Dict
      :param initialize_function_name: The name of the function to be used for initialization.
      :type initialize_function_name: str
      :param initialize_function_code: The function code to be used for initialization.
      :type initialize_function_code: str

      :returns: The new agent.
      :rtype: Agent



   .. py:method:: generate_agent_code(prompt, fast_mode = None)

      Generates the code for defining an AI Agent

      :param prompt: A natural language prompt which describes agent specification. Describe what the agent will do, what inputs it will expect, and what outputs it will give out
      :type prompt: str
      :param fast_mode: If True, runs a faster but slightly less accurate code generation pipeline
      :type fast_mode: bool



   .. py:method:: list_agents()

      Retrieves the list of agents in the specified project.

      :param project_id: The unique identifier associated with the project.
      :type project_id: str

      :returns: A list of agents in the project.
      :rtype: list[Agent]



   .. py:method:: create_document_retriever(name, feature_group_id, document_retriever_config = None)

      Returns a document retriever that stores embeddings for document chunks in a feature group.

      Document columns in the feature group are broken into chunks. For cases with multiple document columns, chunks from all columns are combined together to form a single chunk.


      :param name: The name of the Document Retriever. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
      :type name: str
      :param feature_group_id: The ID of the feature group that the Document Retriever is associated with.
      :type feature_group_id: str
      :param document_retriever_config: The configuration, including chunk_size and chunk_overlap_fraction, for document retrieval.
      :type document_retriever_config: VectorStoreConfig

      :returns: The newly created document retriever.
      :rtype: DocumentRetriever



   .. py:method:: list_document_retrievers(limit = 100, start_after_id = None)

      List all the document retrievers.

      :param limit: The number of document retrievers to return.
      :type limit: int
      :param start_after_id: An offset parameter to exclude all document retrievers up to this specified ID.
      :type start_after_id: str

      :returns: All the document retrievers in the organization associated with the specified project.
      :rtype: list[DocumentRetriever]



   .. py:method:: create_model_from_functions(train_function, predict_function = None, training_input_tables = None, predict_many_function = None, initialize_function = None, cpu_size = None, memory = None, training_config = None, exclusive_run = False)

      Creates a model using python.

      :param train_function: The train function is passed.
      :type train_function: callable
      :param predict_function: The prediction function is passed.
      :type predict_function: callable
      :param training_input_tables: The input tables to be used for training the model. Defaults to None.
      :type training_input_tables: list
      :param predict_many_function: Prediction function for batch input
      :type predict_many_function: callable
      :param cpu_size: Size of the cpu for the feature group function
      :type cpu_size: str
      :param memory: Memory (in GB) for the feature group function
      :type memory: int

      :returns: The model object.
      :rtype: Model



.. py:class:: ProjectConfig(client, type=None, config={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Project-specific config for a feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param type: Type of project config
   :type type: str
   :param config: Project-specific config for this feature group
   :type config: ProjectFeatureGroupConfig


   .. py:attribute:: type
      :value: None



   .. py:attribute:: config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ProjectFeatureGroup(client, featureGroupId=None, modificationLock=None, name=None, featureGroupSourceType=None, tableName=None, sql=None, datasetId=None, functionSourceCode=None, functionName=None, sourceTables=None, createdAt=None, description=None, sqlError=None, latestVersionOutdated=None, referencedFeatureGroups=None, tags=None, primaryKey=None, updateTimestampKey=None, lookupKeys=None, streamingEnabled=None, incremental=None, mergeConfig=None, samplingConfig=None, cpuSize=None, memory=None, streamingReady=None, featureTags=None, moduleName=None, templateBindings=None, featureExpression=None, useOriginalCsvNames=None, pythonFunctionBindings=None, pythonFunctionName=None, useGpu=None, versionLimit=None, exportOnMaterialization=None, featureGroupType=None, features={}, duplicateFeatures={}, pointInTimeGroups={}, annotationConfig={}, concatenationConfig={}, indexingConfig={}, codeSource={}, featureGroupTemplate={}, explanation={}, refreshSchedules={}, exportConnectorConfig={}, projectFeatureGroupSchema={}, projectConfig={}, latestFeatureGroupVersion={}, operatorConfig={})

   Bases: :py:obj:`abacusai.feature_group.FeatureGroup`


   A feature group along with project specific mappings

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupId: Unique identifier for this feature group.
   :type featureGroupId: str
   :param modificationLock: If feature group is locked against a change or not.
   :type modificationLock: bool
   :param name:
   :type name: str
   :param featureGroupSourceType: The source type of the feature group
   :type featureGroupSourceType: str
   :param tableName: Unique table name of this feature group.
   :type tableName: str
   :param sql: SQL definition creating this feature group.
   :type sql: str
   :param datasetId: Dataset ID the feature group is sourced from.
   :type datasetId: str
   :param functionSourceCode: Source definition creating this feature group.
   :type functionSourceCode: str
   :param functionName: Function name to execute from the source code.
   :type functionName: str
   :param sourceTables: Source tables for this feature group.
   :type sourceTables: list[str]
   :param createdAt: Timestamp at which the feature group was created.
   :type createdAt: str
   :param description: Description of the feature group.
   :type description: str
   :param sqlError: Error message with this feature group.
   :type sqlError: str
   :param latestVersionOutdated: Is latest materialized feature group version outdated.
   :type latestVersionOutdated: bool
   :param referencedFeatureGroups: Feature groups this feature group is used in.
   :type referencedFeatureGroups: list[str]
   :param tags: Tags added to this feature group.
   :type tags: list[str]
   :param primaryKey: Primary index feature.
   :type primaryKey: str
   :param updateTimestampKey: Primary timestamp feature.
   :type updateTimestampKey: str
   :param lookupKeys: Additional indexed features for this feature group.
   :type lookupKeys: list[str]
   :param streamingEnabled: If true, the feature group can have data streamed to it.
   :type streamingEnabled: bool
   :param incremental: If feature group corresponds to an incremental dataset.
   :type incremental: bool
   :param mergeConfig: Merge configuration settings for the feature group.
   :type mergeConfig: dict
   :param samplingConfig: Sampling configuration for the feature group.
   :type samplingConfig: dict
   :param cpuSize: CPU size specified for the Python feature group.
   :type cpuSize: str
   :param memory: Memory in GB specified for the Python feature group.
   :type memory: int
   :param streamingReady: If true, the feature group is ready to receive streaming data.
   :type streamingReady: bool
   :param featureTags: Tags for features in this feature group
   :type featureTags: dict
   :param moduleName: Path to the file with the feature group function.
   :type moduleName: str
   :param templateBindings: Config specifying variable names and values to use when resolving a feature group template.
   :type templateBindings: dict
   :param featureExpression: If the dataset feature group has custom features, the SQL select expression creating those features.
   :type featureExpression: str
   :param useOriginalCsvNames: If true, the feature group will use the original column names in the source dataset.
   :type useOriginalCsvNames: bool
   :param pythonFunctionBindings: Config specifying variable names, types, and values to use when resolving a Python feature group.
   :type pythonFunctionBindings: dict
   :param pythonFunctionName: Name of the Python function the feature group was built from.
   :type pythonFunctionName: str
   :param useGpu: Whether this feature group is using gpu
   :type useGpu: bool
   :param versionLimit: Version limit for the feature group.
   :type versionLimit: int
   :param exportOnMaterialization: Whether to export the feature group on materialization.
   :type exportOnMaterialization: bool
   :param featureGroupType: Project type when the feature group is used in the context of a project.
   :type featureGroupType: str
   :param features: List of resolved features.
   :type features: Feature
   :param duplicateFeatures: List of duplicate features.
   :type duplicateFeatures: Feature
   :param pointInTimeGroups: List of Point In Time Groups.
   :type pointInTimeGroups: PointInTimeGroup
   :param annotationConfig: Annotation config for this feature
   :type annotationConfig: AnnotationConfig
   :param latestFeatureGroupVersion: Latest feature group version.
   :type latestFeatureGroupVersion: FeatureGroupVersion
   :param concatenationConfig: Feature group ID whose data will be concatenated into this feature group.
   :type concatenationConfig: ConcatenationConfig
   :param indexingConfig: Indexing config for the feature group for feature store
   :type indexingConfig: IndexingConfig
   :param codeSource: If a Python feature group, information on the source code.
   :type codeSource: CodeSource
   :param featureGroupTemplate: FeatureGroupTemplate to use when this feature group is attached to a template.
   :type featureGroupTemplate: FeatureGroupTemplate
   :param explanation: Natural language explanation of the feature group
   :type explanation: NaturalLanguageExplanation
   :param refreshSchedules: List of schedules that determines when the next version of the feature group will be created.
   :type refreshSchedules: RefreshSchedule
   :param exportConnectorConfig: The export config (file connector or database connector information) for feature group exports.
   :type exportConnectorConfig: FeatureGroupRefreshExportConfig
   :param projectFeatureGroupSchema: Project-specific schema for this feature group.
   :type projectFeatureGroupSchema: ProjectFeatureGroupSchema
   :param projectConfig: Project-specific config for this feature group.
   :type projectConfig: ProjectConfig
   :param operatorConfig: Operator configuration settings for the feature group.
   :type operatorConfig: OperatorConfig


   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: modification_lock
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: feature_group_source_type
      :value: None



   .. py:attribute:: table_name
      :value: None



   .. py:attribute:: sql
      :value: None



   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: function_source_code
      :value: None



   .. py:attribute:: function_name
      :value: None



   .. py:attribute:: source_tables
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: sql_error
      :value: None



   .. py:attribute:: latest_version_outdated
      :value: None



   .. py:attribute:: referenced_feature_groups
      :value: None



   .. py:attribute:: tags
      :value: None



   .. py:attribute:: primary_key
      :value: None



   .. py:attribute:: update_timestamp_key
      :value: None



   .. py:attribute:: lookup_keys
      :value: None



   .. py:attribute:: streaming_enabled
      :value: None



   .. py:attribute:: incremental
      :value: None



   .. py:attribute:: merge_config
      :value: None



   .. py:attribute:: sampling_config
      :value: None



   .. py:attribute:: cpu_size
      :value: None



   .. py:attribute:: memory
      :value: None



   .. py:attribute:: streaming_ready
      :value: None



   .. py:attribute:: feature_tags
      :value: None



   .. py:attribute:: module_name
      :value: None



   .. py:attribute:: template_bindings
      :value: None



   .. py:attribute:: feature_expression
      :value: None



   .. py:attribute:: use_original_csv_names
      :value: None



   .. py:attribute:: python_function_bindings
      :value: None



   .. py:attribute:: python_function_name
      :value: None



   .. py:attribute:: use_gpu
      :value: None



   .. py:attribute:: version_limit
      :value: None



   .. py:attribute:: export_on_materialization
      :value: None



   .. py:attribute:: feature_group_type
      :value: None



   .. py:attribute:: features


   .. py:attribute:: duplicate_features


   .. py:attribute:: point_in_time_groups


   .. py:attribute:: annotation_config


   .. py:attribute:: concatenation_config


   .. py:attribute:: indexing_config


   .. py:attribute:: code_source


   .. py:attribute:: feature_group_template


   .. py:attribute:: explanation


   .. py:attribute:: refresh_schedules


   .. py:attribute:: export_connector_config


   .. py:attribute:: project_feature_group_schema


   .. py:attribute:: project_config


   .. py:attribute:: latest_feature_group_version


   .. py:attribute:: operator_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ProjectFeatureGroupSchema(client, nestedSchema=None, schema={}, duplicateFeatures={}, projectConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A schema description for a project feature group

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param nestedSchema: List of schema of nested features
   :type nestedSchema: list
   :param schema: List of schema description for the feature
   :type schema: Schema
   :param duplicateFeatures: List of duplicate featureschemas
   :type duplicateFeatures: Schema
   :param projectConfig: Project-specific config for this feature group.
   :type projectConfig: ProjectConfig


   .. py:attribute:: nested_schema
      :value: None



   .. py:attribute:: schema


   .. py:attribute:: duplicate_features


   .. py:attribute:: project_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ProjectFeatureGroupSchemaVersion(client, schemaVersion=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A version of a schema

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param schemaVersion: The unique identifier of a schema version.
   :type schemaVersion: id


   .. py:attribute:: schema_version
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ProjectValidation(client, valid=None, datasetErrors=None, columnHints=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A validation result for a project

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param valid: `true` if the project is valid and ready to be trained, otherwise `false`.
   :type valid: bool
   :param datasetErrors: A list of errors keeping the dataset from being valid
   :type datasetErrors: list[dict]
   :param columnHints: Hints for what to set on the columns
   :type columnHints: dict


   .. py:attribute:: valid
      :value: None



   .. py:attribute:: dataset_errors
      :value: None



   .. py:attribute:: column_hints
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: PythonFunction(client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, connectors=None, configurations=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Customer created python function

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param notebookId: The unique identifier of the notebook used to spin up the notebook upon creation.
   :type notebookId: str
   :param name: The name to identify the algorithm, only uppercase letters, numbers, and underscores allowed (i.e. it must be a valid Python identifier)
   :type name: str
   :param createdAt: The ISO-8601 string representing when the Python function was created.
   :type createdAt: str
   :param functionVariableMappings: A description of the function variables.
   :type functionVariableMappings: dict
   :param outputVariableMappings: A description of the variables returned by the function
   :type outputVariableMappings: dict
   :param functionName: The name of the Python function to be used.
   :type functionName: str
   :param pythonFunctionId: The unique identifier of the Python function.
   :type pythonFunctionId: str
   :param functionType: The type of the Python function.
   :type functionType: str
   :param packageRequirements: The pip package dependencies required to run the code
   :type packageRequirements: list
   :param description: Description of the Python function.
   :type description: str
   :param examples: Dictionary containing example use cases and anti-patterns. Includes 'positive' examples showing recommended usage and 'negative' examples showing cases to avoid.
   :type examples: dict[str, list[str]]
   :param connectors: Dictionary containing user-level and organization-level connectors
   :type connectors: dict
   :param configurations: Dictionary containing configurations for the Python function
   :type configurations: dict
   :param codeSource: Information about the source code of the Python function.
   :type codeSource: CodeSource


   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: function_variable_mappings
      :value: None



   .. py:attribute:: output_variable_mappings
      :value: None



   .. py:attribute:: function_name
      :value: None



   .. py:attribute:: python_function_id
      :value: None



   .. py:attribute:: function_type
      :value: None



   .. py:attribute:: package_requirements
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: examples
      :value: None



   .. py:attribute:: connectors
      :value: None



   .. py:attribute:: configurations
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: add_graph_to_dashboard(graph_dashboard_id, function_variable_mappings = None, name = None)

      Add a python plot function to a dashboard

      :param graph_dashboard_id: Unique string identifier for the graph dashboard to update.
      :type graph_dashboard_id: str
      :param function_variable_mappings: List of arguments to be supplied to the function as parameters, in the format [{'name': 'function_argument', 'variable_type': 'FEATURE_GROUP', 'value': 'name_of_feature_group'}].
      :type function_variable_mappings: List
      :param name: Name of the added python plot
      :type name: str

      :returns: An object describing the graph dashboard.
      :rtype: GraphDashboard



   .. py:method:: validate_locally(kwargs = None)

      Validates a Python function by running it with the given input values in an local environment. Taking Input Feature Group as either name(string) or Pandas DataFrame in kwargs.

      :param kwargs: A dictionary mapping function arguments to values to pass to the function. Feature group names will automatically be converted into pandas dataframes.
      :type kwargs: dict

      :returns: The result of executing the python function
      :rtype: any

      :raises TypeError: If an Input Feature Group argument has an invalid type or argument is missing.
      :raises Exception: If an error occurs while validating the Python function.



.. py:class:: PythonPlotFunction(client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, plotName=None, graphReferenceId=None, codeSource={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Create a Plot for a Dashboard

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param notebookId: Unique string identifier of the notebook used to spin up the notebook upon creation.
   :type notebookId: str
   :param name: The name used to identify the algorithm. Only uppercase letters, numbers, and underscores are allowed.
   :type name: str
   :param createdAt: Date and time when the Python function was created, in ISO-8601 format.
   :type createdAt: str
   :param functionVariableMappings: The mappings for function parameters' names.
   :type functionVariableMappings: dict
   :param functionName: The name of the Python function to be used.
   :type functionName: str
   :param pythonFunctionId: Unique string identifier of the Python function.
   :type pythonFunctionId: str
   :param functionType: The type of the Python function.
   :type functionType: str
   :param plotName: Name of the plot.
   :type plotName: str
   :param graphReferenceId: Reference ID of the dashboard to the plot.
   :type graphReferenceId: str
   :param codeSource: Info about the source code of the Python function.
   :type codeSource: CodeSource


   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: function_variable_mappings
      :value: None



   .. py:attribute:: function_name
      :value: None



   .. py:attribute:: python_function_id
      :value: None



   .. py:attribute:: function_type
      :value: None



   .. py:attribute:: plot_name
      :value: None



   .. py:attribute:: graph_reference_id
      :value: None



   .. py:attribute:: code_source


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: RangeViolation(client, name=None, trainingMin=None, trainingMax=None, predictionMin=None, predictionMax=None, freqAboveTrainingRange=None, freqBelowTrainingRange=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Summary of important range mismatches for a numerical feature discovered by a model monitoring instance

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: Name of feature.
   :type name: str
   :param trainingMin: Minimum value of training distribution for the specified feature.
   :type trainingMin: float
   :param trainingMax: Maximum value of training distribution for the specified feature.
   :type trainingMax: float
   :param predictionMin: Minimum value of prediction distribution for the specified feature.
   :type predictionMin: float
   :param predictionMax: Maximum value of prediction distribution for the specified feature.
   :type predictionMax: float
   :param freqAboveTrainingRange: Frequency of prediction rows below training minimum for the specified feature.
   :type freqAboveTrainingRange: float
   :param freqBelowTrainingRange: Frequency of prediction rows above training maximum for the specified feature.
   :type freqBelowTrainingRange: float


   .. py:attribute:: name
      :value: None



   .. py:attribute:: training_min
      :value: None



   .. py:attribute:: training_max
      :value: None



   .. py:attribute:: prediction_min
      :value: None



   .. py:attribute:: prediction_max
      :value: None



   .. py:attribute:: freq_above_training_range
      :value: None



   .. py:attribute:: freq_below_training_range
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: RealtimeMonitor(client, realtimeMonitorId=None, name=None, createdAt=None, deploymentId=None, lookbackTime=None, realtimeMonitorSchedule=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A real-time monitor

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param realtimeMonitorId: The unique identifier of the real-time monitor.
   :type realtimeMonitorId: str
   :param name: The user-friendly name for the real-time monitor.
   :type name: str
   :param createdAt: Date and time at which the real-time monitor was created.
   :type createdAt: str
   :param deploymentId: Deployment ID that this real-time monitor is monitoring.
   :type deploymentId: str
   :param lookbackTime: The lookback time for the real-time monitor.
   :type lookbackTime: int
   :param realtimeMonitorSchedule: The drift computation schedule for the real-time monitor.
   :type realtimeMonitorSchedule: str


   .. py:attribute:: realtime_monitor_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: lookback_time
      :value: None



   .. py:attribute:: realtime_monitor_schedule
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: update(realtime_monitor_schedule = None, lookback_time = None)

      Update the real-time monitor associated with the real-time monitor id.

      :param realtime_monitor_schedule: The cron expression for triggering monitor
      :type realtime_monitor_schedule: str
      :param lookback_time: Lookback time (in seconds) for each monitor trigger
      :type lookback_time: float

      :returns: Object describing the realtime monitor.
      :rtype: RealtimeMonitor



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: RealtimeMonitor



   .. py:method:: describe()

      Get the real-time monitor associated with the real-time monitor id.

      :param realtime_monitor_id: Unique string identifier for the real-time monitor.
      :type realtime_monitor_id: str

      :returns: Object describing the real-time monitor.
      :rtype: RealtimeMonitor



   .. py:method:: delete()

      Delete the real-time monitor associated with the real-time monitor id.

      :param realtime_monitor_id: Unique string identifier for the real-time monitor.
      :type realtime_monitor_id: str



.. py:class:: RefreshPipelineRun(client, refreshPipelineRunId=None, refreshPolicyId=None, createdAt=None, startedAt=None, completedAt=None, status=None, refreshType=None, datasetVersions=None, featureGroupVersion=None, modelVersions=None, deploymentVersions=None, batchPredictions=None, refreshPolicy={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   This keeps track of the overall status of a refresh. A refresh can span multiple resources such as the creation of new dataset versions and the training of a new model version based on them.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param refreshPipelineRunId: The unique identifier for the refresh pipeline run.
   :type refreshPipelineRunId: str
   :param refreshPolicyId: Populated when the run was triggered by a refresh policy.
   :type refreshPolicyId: str
   :param createdAt: The time when this refresh pipeline run was created, in ISO-8601 format.
   :type createdAt: str
   :param startedAt: The time when the refresh pipeline run was started, in ISO-8601 format.
   :type startedAt: str
   :param completedAt: The time when the refresh pipeline run was completed, in ISO-8601 format.
   :type completedAt: str
   :param status: The status of the refresh pipeline run.
   :type status: str
   :param refreshType: The type of refresh policy to be run.
   :type refreshType: str
   :param datasetVersions: A list of dataset version IDs that this refresh pipeline run is monitoring.
   :type datasetVersions: list[str]
   :param featureGroupVersion: The feature group version ID that this refresh pipeline run is monitoring.
   :type featureGroupVersion: str
   :param modelVersions: A list of model version IDs that this refresh pipeline run is monitoring.
   :type modelVersions: list[str]
   :param deploymentVersions: A list of deployment version IDs that this refresh pipeline run is monitoring.
   :type deploymentVersions: list[str]
   :param batchPredictions: A list of batch prediction IDs that this refresh pipeline run is monitoring.
   :type batchPredictions: list[str]
   :param refreshPolicy: The refresh policy for this refresh policy run.
   :type refreshPolicy: RefreshPolicy


   .. py:attribute:: refresh_pipeline_run_id
      :value: None



   .. py:attribute:: refresh_policy_id
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: started_at
      :value: None



   .. py:attribute:: completed_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: refresh_type
      :value: None



   .. py:attribute:: dataset_versions
      :value: None



   .. py:attribute:: feature_group_version
      :value: None



   .. py:attribute:: model_versions
      :value: None



   .. py:attribute:: deployment_versions
      :value: None



   .. py:attribute:: batch_predictions
      :value: None



   .. py:attribute:: refresh_policy


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: RefreshPipelineRun



   .. py:method:: describe()

      Retrieve a single refresh pipeline run

      :param refresh_pipeline_run_id: Unique string identifier associated with the refresh pipeline run.
      :type refresh_pipeline_run_id: str

      :returns: A refresh pipeline run object.
      :rtype: RefreshPipelineRun



   .. py:method:: wait_for_complete(timeout=None)

      A waiting call until refresh pipeline run has completed.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to be timed out.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the refresh pipeline run.

      :returns: A string describing the status of a refresh pipeline run (pending, complete, etc.).
      :rtype: str



.. py:class:: RefreshPolicy(client, refreshPolicyId=None, name=None, cron=None, nextRunTime=None, createdAt=None, refreshType=None, projectId=None, datasetIds=None, featureGroupId=None, modelIds=None, deploymentIds=None, batchPredictionIds=None, modelMonitorIds=None, notebookId=None, paused=None, predictionOperatorId=None, pipelineId=None, featureGroupExportConfig={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Refresh Policy describes the frequency at which one or more datasets/models/deployments/batch_predictions can be updated.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param refreshPolicyId: The unique identifier for the refresh policy
   :type refreshPolicyId: str
   :param name: The user-friendly name for the refresh policy
   :type name: str
   :param cron: A cron-style string that describes when this refresh policy is to be executed in UTC
   :type cron: str
   :param nextRunTime: The next UTC time that this refresh policy will be executed
   :type nextRunTime: str
   :param createdAt: The time when the refresh policy was created
   :type createdAt: str
   :param refreshType: The type of refresh policy to be run
   :type refreshType: str
   :param projectId: The unique identifier of a project that this refresh policy applies to
   :type projectId: str
   :param datasetIds: Comma-separated list of Dataset IDs that this refresh policy applies to
   :type datasetIds: list[str]
   :param featureGroupId: Feature Group ID that this refresh policy applies to
   :type featureGroupId: str
   :param modelIds: Comma-separated list of Model IDs that this refresh policy applies to
   :type modelIds: list[str]
   :param deploymentIds: Comma-separated list of Deployment IDs that this refresh policy applies to
   :type deploymentIds: list[str]
   :param batchPredictionIds: Comma-separated list of Batch Prediction IDs that this refresh policy applies to
   :type batchPredictionIds: list[str]
   :param modelMonitorIds: Comma-separated list of Model Monitor IDs that this refresh policy applies to
   :type modelMonitorIds: list[str]
   :param notebookId: Notebook ID that this refresh policy applies to
   :type notebookId: str
   :param paused: True if the refresh policy is paused
   :type paused: bool
   :param predictionOperatorId: Prediction Operator ID that this refresh policy applies to
   :type predictionOperatorId: str
   :param pipelineId: The Pipeline ID With The Cron Schedule
   :type pipelineId: str
   :param featureGroupExportConfig: The export configuration for the feature group. Only applicable if refresh_type is FEATUREGROUP.
   :type featureGroupExportConfig: FeatureGroupRefreshExportConfig


   .. py:attribute:: refresh_policy_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: cron
      :value: None



   .. py:attribute:: next_run_time
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: refresh_type
      :value: None



   .. py:attribute:: project_id
      :value: None



   .. py:attribute:: dataset_ids
      :value: None



   .. py:attribute:: feature_group_id
      :value: None



   .. py:attribute:: model_ids
      :value: None



   .. py:attribute:: deployment_ids
      :value: None



   .. py:attribute:: batch_prediction_ids
      :value: None



   .. py:attribute:: model_monitor_ids
      :value: None



   .. py:attribute:: notebook_id
      :value: None



   .. py:attribute:: paused
      :value: None



   .. py:attribute:: prediction_operator_id
      :value: None



   .. py:attribute:: pipeline_id
      :value: None



   .. py:attribute:: feature_group_export_config


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: delete()

      Delete a refresh policy.

      :param refresh_policy_id: Unique string identifier associated with the refresh policy to delete.
      :type refresh_policy_id: str



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: RefreshPolicy



   .. py:method:: describe()

      Retrieve a single refresh policy

      :param refresh_policy_id: The unique ID associated with this refresh policy.
      :type refresh_policy_id: str

      :returns: An object representing the refresh policy.
      :rtype: RefreshPolicy



   .. py:method:: list_refresh_pipeline_runs()

      List the the times that the refresh policy has been run

      :param refresh_policy_id: Unique identifier associated with the refresh policy.
      :type refresh_policy_id: str

      :returns: List of refresh pipeline runs for the given refresh policy ID.
      :rtype: list[RefreshPipelineRun]



   .. py:method:: pause()

      Pauses a refresh policy

      :param refresh_policy_id: Unique identifier associated with the refresh policy to be paused.
      :type refresh_policy_id: str



   .. py:method:: resume()

      Resumes a refresh policy

      :param refresh_policy_id: The unique ID associated with this refresh policy.
      :type refresh_policy_id: str



   .. py:method:: run()

      Force a run of the refresh policy.

      :param refresh_policy_id: Unique string identifier associated with the refresh policy to be run.
      :type refresh_policy_id: str



   .. py:method:: update(name = None, cron = None, feature_group_export_config = None)

      Update the name or cron string of a refresh policy

      :param name: Name of the refresh policy to be updated.
      :type name: str
      :param cron: Cron string describing the schedule from the refresh policy to be updated.
      :type cron: str
      :param feature_group_export_config: Feature group export configuration to update a feature group refresh policy.
      :type feature_group_export_config: FeatureGroupExportConfig

      :returns: Updated refresh policy.
      :rtype: RefreshPolicy



.. py:class:: RefreshSchedule(client, refreshPolicyId=None, nextRunTime=None, cron=None, refreshType=None, error=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A refresh schedule for an object. Defines when the next version of the object will be created

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param refreshPolicyId: The unique identifier of the refresh policy
   :type refreshPolicyId: str
   :param nextRunTime: The next run time of the refresh policy. If null, the policy is paused.
   :type nextRunTime: str
   :param cron: A cron-style string that describes the when this refresh policy is to be executed in UTC
   :type cron: str
   :param refreshType: The type of refresh that will be run
   :type refreshType: str
   :param error: An error message for the last pipeline run of a policy
   :type error: str


   .. py:attribute:: refresh_policy_id
      :value: None



   .. py:attribute:: next_run_time
      :value: None



   .. py:attribute:: cron
      :value: None



   .. py:attribute:: refresh_type
      :value: None



   .. py:attribute:: error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: RegenerateLlmExternalApplication(client, name=None, externalApplicationId=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An external application that specifies an LLM user can regenerate with in RouteLLM.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The external name of the LLM.
   :type name: str
   :param externalApplicationId: The unique identifier of the external application.
   :type externalApplicationId: str


   .. py:attribute:: name
      :value: None



   .. py:attribute:: external_application_id
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ResolvedFeatureGroupTemplate(client, featureGroupTemplateId=None, resolvedVariables=None, resolvedSql=None, templateSql=None, sqlError=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Final SQL from resolving a feature group template.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param featureGroupTemplateId: Unique identifier for this feature group template.
   :type featureGroupTemplateId: str
   :param resolvedVariables: Map from template variable names to parameters available during template resolution.
   :type resolvedVariables: dict
   :param resolvedSql: SQL resulting from resolving the SQL template by applying the resolved bindings.
   :type resolvedSql: str
   :param templateSql: SQL that can include variables to be replaced by values from the template config to resolve this template SQL into a valid SQL query for a feature group.
   :type templateSql: str
   :param sqlError: if invalid, the sql error message
   :type sqlError: str


   .. py:attribute:: feature_group_template_id
      :value: None



   .. py:attribute:: resolved_variables
      :value: None



   .. py:attribute:: resolved_sql
      :value: None



   .. py:attribute:: template_sql
      :value: None



   .. py:attribute:: sql_error
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: RoutingAction(client, id=None, title=None, prompt=None, placeholder=None, value=None, displayName=None, isLarge=None, isMedium=None, additionalInfo=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Routing action

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param id: The id of the routing action.
   :type id: str
   :param title: The title of the routing action.
   :type title: str
   :param prompt: The prompt of the routing action.
   :type prompt: str
   :param placeholder: The placeholder of the routing action.
   :type placeholder: str
   :param value: The value of the routing action.
   :type value: str
   :param displayName: The display name of the routing action.
   :type displayName: str
   :param isLarge: UI placement
   :type isLarge: bool
   :param isMedium: UI placement
   :type isMedium: bool
   :param additionalInfo: Additional information for the routing action.
   :type additionalInfo: dict


   .. py:attribute:: id
      :value: None



   .. py:attribute:: title
      :value: None



   .. py:attribute:: prompt
      :value: None



   .. py:attribute:: placeholder
      :value: None



   .. py:attribute:: value
      :value: None



   .. py:attribute:: display_name
      :value: None



   .. py:attribute:: is_large
      :value: None



   .. py:attribute:: is_medium
      :value: None



   .. py:attribute:: additional_info
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Schema(client, name=None, featureMapping=None, detectedFeatureMapping=None, featureType=None, detectedFeatureType=None, dataType=None, detectedDataType=None, nestedFeatures={}, pointInTimeInfo={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A schema description for a feature

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The unique name of the feature.
   :type name: str
   :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
   :type featureMapping: str
   :param detectedFeatureMapping: Detected feature mapping for this feature
   :type detectedFeatureMapping: str
   :param featureType: The underlying data type of each feature:  CATEGORICAL,  CATEGORICAL_LIST,  NUMERICAL,  TIMESTAMP,  TEXT,  EMAIL,  LABEL_LIST,  ENTITY_LABEL_LIST,  PAGE_LABEL_LIST,  JSON,  OBJECT_REFERENCE,  MULTICATEGORICAL_LIST,  COORDINATE_LIST,  NUMERICAL_LIST,  TIMESTAMP_LIST,  ZIPCODE,  URL,  PAGE_INFOS,  PAGES_DOCUMENT,  TOKENS_DOCUMENT,  MESSAGE_LIST.
   :type featureType: str
   :param detectedFeatureType: The detected feature type for this feature
   :type detectedFeatureType: str
   :param dataType: The underlying data type of each feature:  INTEGER,  FLOAT,  STRING,  DATE,  DATETIME,  BOOLEAN,  LIST,  STRUCT,  NULL,  BINARY.
   :type dataType: str
   :param detectedDataType: The detected data type for this feature
   :type detectedDataType: str
   :param nestedFeatures: List of features of nested feature
   :type nestedFeatures: NestedFeatureSchema
   :param pointInTimeInfo: Point in time information for this feature
   :type pointInTimeInfo: PointInTimeFeatureInfo


   .. py:attribute:: name
      :value: None



   .. py:attribute:: feature_mapping
      :value: None



   .. py:attribute:: detected_feature_mapping
      :value: None



   .. py:attribute:: feature_type
      :value: None



   .. py:attribute:: detected_feature_type
      :value: None



   .. py:attribute:: data_type
      :value: None



   .. py:attribute:: detected_data_type
      :value: None



   .. py:attribute:: nested_features


   .. py:attribute:: point_in_time_info


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: SftpKey(client, keyName=None, publicKey=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An SFTP key

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param keyName: The name of the key
   :type keyName: str
   :param publicKey: The public key
   :type publicKey: str


   .. py:attribute:: key_name
      :value: None



   .. py:attribute:: public_key
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: StreamingAuthToken(client, streamingToken=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A streaming authentication token that is used to authenticate requests to append data to streaming datasets

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param streamingToken: The unique token used to authenticate requests
   :type streamingToken: str
   :param createdAt: When the token was created
   :type createdAt: str


   .. py:attribute:: streaming_token
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: StreamingClient(client_options = None)

   Bases: :py:obj:`abacusai.client.BaseApiClient`


   Abacus.AI Streaming API Client. Does not utilize authentication and only contains public streaming methods

   :param client_options: Optional API client configurations
   :type client_options: ClientOptions


   .. py:method:: upsert_item_embeddings(streaming_token, model_id, item_id, vector, catalog_id = None)

      Upserts an embedding vector for an item id for a model_id.

      :param streaming_token: The streaming token for authenticating requests to the model.
      :type streaming_token: str
      :param model_id: A unique string identifier for the model to upsert item embeddings to.
      :type model_id: str
      :param item_id: The item id for which its embeddings will be upserted.
      :type item_id: str
      :param vector: The embedding vector.
      :type vector: list
      :param catalog_id: The name of the catalog in the model to update.
      :type catalog_id: str



   .. py:method:: delete_item_embeddings(streaming_token, model_id, item_ids, catalog_id = None)

      Deletes KNN embeddings for a list of item IDs for a given model ID.

      :param streaming_token: The streaming token for authenticating requests to the model.
      :type streaming_token: str
      :param model_id: A unique string identifier for the model from which to delete item embeddings.
      :type model_id: str
      :param item_ids: A list of item IDs whose embeddings will be deleted.
      :type item_ids: list
      :param catalog_id: An optional name to specify which catalog in a model to update.
      :type catalog_id: str



   .. py:method:: upsert_multiple_item_embeddings(streaming_token, model_id, upserts, catalog_id = None)

      Upserts a knn embedding for multiple item ids for a model_id.

      :param streaming_token: The streaming token for authenticating requests to the model.
      :type streaming_token: str
      :param model_id: The unique string identifier of the model to upsert item embeddings to.
      :type model_id: str
      :param upserts: A list of dictionaries of the form {'itemId': ..., 'vector': [...]} for each upsert.
      :type upserts: list
      :param catalog_id: Name of the catalog in the model to update.
      :type catalog_id: str



   .. py:method:: append_data(feature_group_id, streaming_token, data)

      Appends new data into the feature group for a given lookup key recordId.

      :param feature_group_id: Unique string identifier for the streaming feature group to record data to.
      :type feature_group_id: str
      :param streaming_token: The streaming token for authenticating requests.
      :type streaming_token: str
      :param data: The data to record as a JSON object.
      :type data: dict



   .. py:method:: append_multiple_data(feature_group_id, streaming_token, data)

      Appends new data into the feature group for a given lookup key recordId.

      :param feature_group_id: Unique string identifier of the streaming feature group to record data to.
      :type feature_group_id: str
      :param streaming_token: Streaming token for authenticating requests.
      :type streaming_token: str
      :param data: Data to record, as a list of JSON objects.
      :type data: list



.. py:class:: StreamingConnector(client, streamingConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A connector to an external service

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param streamingConnectorId: The unique ID for the connection.
   :type streamingConnectorId: str
   :param service: The service this connection connects to
   :type service: str
   :param name: A user-friendly name for the service
   :type name: str
   :param createdAt: When the API key was created
   :type createdAt: str
   :param status: The status of the Database Connector
   :type status: str
   :param auth: Non-secret connection information for this connector
   :type auth: dict


   .. py:attribute:: streaming_connector_id
      :value: None



   .. py:attribute:: service
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: auth
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: verify()

      Checks to see if Abacus.AI can access the streaming connector.

      :param streaming_connector_id: Unique string identifier for the streaming connector to be checked for Abacus.AI access.
      :type streaming_connector_id: str



   .. py:method:: rename(name)

      Renames a Streaming Connector

      :param name: A new name for the streaming connector.
      :type name: str



   .. py:method:: delete()

      Delete a streaming connector.

      :param streaming_connector_id: The unique identifier for the streaming connector.
      :type streaming_connector_id: str



.. py:class:: StreamingRowCount(client, count=None, startTsMs=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Returns the number of rows in a streaming feature group from the specified time

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param count: The number of rows in the feature group
   :type count: int
   :param startTsMs: The start time for the number of rows.
   :type startTsMs: int


   .. py:attribute:: count
      :value: None



   .. py:attribute:: start_ts_ms
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: StreamingSampleCode(client, python=None, curl=None, console=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Sample code for adding to a streaming feature group with examples from different locations.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param python: The python code sample.
   :type python: str
   :param curl: The curl code sample.
   :type curl: str
   :param console: The console code sample
   :type console: str


   .. py:attribute:: python
      :value: None



   .. py:attribute:: curl
      :value: None



   .. py:attribute:: console
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: TemplateNodeDetails(client, notebookCode=None, workflowGraphNode={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Details about WorkflowGraphNode object and notebook code for adding template nodes in workflow.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param notebookCode: The boilerplate code that needs to be shown in notebook for creating workflow graph node using corresponding template.
   :type notebookCode: list
   :param workflowGraphNode: The workflow graph node object corresponding to the template.
   :type workflowGraphNode: WorkflowGraphNode


   .. py:attribute:: notebook_code
      :value: None



   .. py:attribute:: workflow_graph_node


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: TestPointPredictions(client, count=None, columns=None, data=None, metricsColumns=None, summarizedMetrics=None, errorDescription=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Test Point Predictions

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param count: Count of total rows in the preview data for the SQL.
   :type count: int
   :param columns: The returned columns
   :type columns: list
   :param data: A list of data rows, each represented as a list.
   :type data: list
   :param metricsColumns: The columns that are the metrics.
   :type metricsColumns: list
   :param summarizedMetrics: A map between the problem type metrics and the mean of the results matching the query
   :type summarizedMetrics: dict
   :param errorDescription: Description of an error in case of failure.
   :type errorDescription: str


   .. py:attribute:: count
      :value: None



   .. py:attribute:: columns
      :value: None



   .. py:attribute:: data
      :value: None



   .. py:attribute:: metrics_columns
      :value: None



   .. py:attribute:: summarized_metrics
      :value: None



   .. py:attribute:: error_description
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: ToneDetails(client, voiceId=None, name=None, gender=None, language=None, age=None, accent=None, useCase=None, description=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Tone details for audio

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param voiceId: The voice id
   :type voiceId: str
   :param name: The name
   :type name: str
   :param gender: The gender
   :type gender: str
   :param language: The language
   :type language: str
   :param age: The age
   :type age: str
   :param accent: The accent
   :type accent: str
   :param useCase: The use case
   :type useCase: str
   :param description: The description
   :type description: str


   .. py:attribute:: voice_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: gender
      :value: None



   .. py:attribute:: language
      :value: None



   .. py:attribute:: age
      :value: None



   .. py:attribute:: accent
      :value: None



   .. py:attribute:: use_case
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: TrainingConfigOptions(client, name=None, dataType=None, valueType=None, valueOptions=None, value=None, default=None, options=None, description=None, required=None, lastModelValue=None, needsRefresh=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Training options for a model

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The name of the parameter
   :type name: str
   :param dataType: The type of input required for this option
   :type dataType: str
   :param valueType: If the data_type is of type DICT_VALUES, this field specifies the expected value type of the values
   :type valueType: str
   :param valueOptions: The list of valid values for DICT_VALUES
   :type valueOptions: list[str]
   :param value: The value of this option
   :type value: optional[any]
   :param default: The default value for this option
   :type default: optional[any]
   :param options: A dict of options for this parameter
   :type options: dict
   :param description: A description of the parameter
   :type description: str
   :param required: True if the parameter is required for training
   :type required: bool
   :param lastModelValue: The last value used to train a model in this project
   :type lastModelValue: optional[str, int, float, bool]
   :param needsRefresh: True if training config needs to be fetched again when this config option is changed
   :type needsRefresh: bool


   .. py:attribute:: name
      :value: None



   .. py:attribute:: data_type
      :value: None



   .. py:attribute:: value_type
      :value: None



   .. py:attribute:: value_options
      :value: None



   .. py:attribute:: value
      :value: None



   .. py:attribute:: default
      :value: None



   .. py:attribute:: options
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: required
      :value: None



   .. py:attribute:: last_model_value
      :value: None



   .. py:attribute:: needs_refresh
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: TwitterSearchResult(client, title=None, url=None, twitterName=None, twitterHandle=None, thumbnailUrl=None, thumbnailWidth=None, thumbnailHeight=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single twitter search result.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param title: The title of the tweet.
   :type title: str
   :param url: The URL of the tweet.
   :type url: str
   :param twitterName: The name of the twitter user.
   :type twitterName: str
   :param twitterHandle: The handle of the twitter user.
   :type twitterHandle: str
   :param thumbnailUrl: The URL of the thumbnail of the tweet.
   :type thumbnailUrl: str
   :param thumbnailWidth: The width of the thumbnail of the tweet.
   :type thumbnailWidth: int
   :param thumbnailHeight: The height of the thumbnail of the tweet.
   :type thumbnailHeight: int


   .. py:attribute:: title
      :value: None



   .. py:attribute:: url
      :value: None



   .. py:attribute:: twitter_name
      :value: None



   .. py:attribute:: twitter_handle
      :value: None



   .. py:attribute:: thumbnail_url
      :value: None



   .. py:attribute:: thumbnail_width
      :value: None



   .. py:attribute:: thumbnail_height
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Upload(client, uploadId=None, datasetUploadId=None, status=None, datasetId=None, datasetVersion=None, modelId=None, modelVersion=None, batchPredictionId=None, parts=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Upload Reference for uploading file parts

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param uploadId: The unique ID generated when the upload process of the full large file in smaller parts is initiated.
   :type uploadId: str
   :param datasetUploadId: Same as upload_id. It is kept for backwards compatibility purposes.
   :type datasetUploadId: str
   :param status: The current status of the upload.
   :type status: str
   :param datasetId: A reference to the dataset this upload is adding data to.
   :type datasetId: str
   :param datasetVersion: A reference to the dataset version the upload is adding data to.
   :type datasetVersion: str
   :param modelId: A reference the model the upload is creating a version for
   :type modelId: str
   :param modelVersion: A reference to the model version the upload is creating.
   :type modelVersion: str
   :param batchPredictionId: A reference to the batch prediction the upload is creating.
   :type batchPredictionId: str
   :param parts: A list containing the order of the file parts that have been uploaded.
   :type parts: list[dict]
   :param createdAt: The timestamp at which the upload was created.
   :type createdAt: str


   .. py:attribute:: upload_id
      :value: None



   .. py:attribute:: dataset_upload_id
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: dataset_id
      :value: None



   .. py:attribute:: dataset_version
      :value: None



   .. py:attribute:: model_id
      :value: None



   .. py:attribute:: model_version
      :value: None



   .. py:attribute:: batch_prediction_id
      :value: None



   .. py:attribute:: parts
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: cancel()

      Cancels an upload.

      :param upload_id: A unique string identifier for the upload.
      :type upload_id: str



   .. py:method:: part(part_number, part_data)

      Uploads part of a large dataset file from your bucket to our system. Our system currently supports parts of up to 5GB and full files of up to 5TB. Note that each part must be at least 5MB in size, unless it is the last part in the sequence of parts for the full file.

      :param part_number: The 1-indexed number denoting the position of the file part in the sequence of parts for the full file.
      :type part_number: int
      :param part_data: The multipart/form-data for the current part of the full file.
      :type part_data: io.TextIOBase

      :returns: The object 'UploadPart' which encapsulates the hash and the etag for the part that got uploaded.
      :rtype: UploadPart



   .. py:method:: mark_complete()

      Marks an upload process as complete.

      :param upload_id: A unique string identifier for the upload process.
      :type upload_id: str

      :returns: The upload object associated with the process, containing details of the file.
      :rtype: Upload



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Upload



   .. py:method:: describe()

      Retrieves the current upload status (complete or inspecting) and the list of file parts uploaded for a specified dataset upload.

      :param upload_id: The unique ID associated with the file uploaded or being uploaded in parts.
      :type upload_id: str

      :returns: Details associated with the large dataset file uploaded in parts.
      :rtype: Upload



   .. py:method:: upload_part(upload_args)

      Uploads a file part.

      :returns: The object 'UploadPart' that encapsulates the hash and the etag for the part that got uploaded.
      :rtype: UploadPart



   .. py:method:: upload_file(file, threads=10, chunksize=1024 * 1024 * 10, wait_timeout=600)

      Uploads the file in the specified chunk size using the specified number of workers.

      :param file: A bytesIO or StringIO object to upload to Abacus.AI
      :type file: IOBase
      :param threads: The max number of workers to use while uploading the file
      :type threads: int
      :param chunksize: The number of bytes to use for each chunk while uploading the file. Defaults to 10 MB
      :type chunksize: int
      :param wait_timeout: The max number of seconds to wait for the file parts to be joined on Abacus.AI. Defaults to 600.
      :type wait_timeout: int

      :returns: The upload file object.
      :rtype: Upload



   .. py:method:: _yield_upload_part(file, chunksize)


   .. py:method:: wait_for_join(timeout=600)

      A waiting call until the upload parts are joined.

      :param timeout: The waiting time given to the call to finish, if it doesn't finish by the allocated time, the call is said to have timed out. Defaults to 600.
      :type timeout: int



   .. py:method:: get_status()

      Gets the status of the upload.

      :returns: A string describing the status of the upload (pending, complete, etc.).
      :rtype: str



.. py:class:: UploadPart(client, etag=None, md5=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Unique identifiers for a part

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param etag: A unique string for this part.
   :type etag: str
   :param md5: The MD5 hash of this part.
   :type md5: str


   .. py:attribute:: etag
      :value: None



   .. py:attribute:: md5
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: UseCase(client, useCase=None, prettyName=None, description=None, problemType=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Project Use Case

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param useCase: The enum value for this use case
   :type useCase: str
   :param prettyName: A user-friendly name
   :type prettyName: str
   :param description: A description for this use case
   :type description: str
   :param problemType: Name for the underlying problem type
   :type problemType: str


   .. py:attribute:: use_case
      :value: None



   .. py:attribute:: pretty_name
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: problem_type
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: UseCaseRequirements(client, datasetType=None, name=None, description=None, required=None, multi=None, allowedFeatureMappings=None, allowedNestedFeatureMappings=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Use Case Requirements

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param datasetType: The project-specific enum value of the dataset type.
   :type datasetType: str
   :param name: The user-friendly name of the dataset type.
   :type name: str
   :param description: The description of the dataset type.
   :type description: str
   :param required: True if the dataset type is required for this project.
   :type required: bool
   :param multi: If true, multiple versions of the dataset type can be used for training.
   :type multi: bool
   :param allowedFeatureMappings: A collection of key-value pairs, with each key being a column mapping enum (see a list of column mapping enums here) and each value being in the following dictionary format: { "description": str, "allowed_feature_types": feature_type_enum, "required": bool }.
   :type allowedFeatureMappings: dict
   :param allowedNestedFeatureMappings: A collection of key-value pairs, with each key being a column mapping enum (see a list of column mapping enums here) and each value being in the following dictionary format: { "description": str, "allowed_feature_types": feature_type_enum, "required": bool }.
   :type allowedNestedFeatureMappings: dict


   .. py:attribute:: dataset_type
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: required
      :value: None



   .. py:attribute:: multi
      :value: None



   .. py:attribute:: allowed_feature_mappings
      :value: None



   .. py:attribute:: allowed_nested_feature_mappings
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: User(client, name=None, email=None, createdAt=None, status=None, organizationGroups={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   An Abacus.AI User

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param name: The User's name.
   :type name: str
   :param email: The User's primary email address.
   :type email: str
   :param createdAt: The date and time when the user joined Abacus.AI.
   :type createdAt: str
   :param status: `ACTIVE` when the user has accepted an invite to join the organization, else `INVITED`.
   :type status: str
   :param organizationGroups: List of Organization Groups this user belongs to.
   :type organizationGroups: OrganizationGroup


   .. py:attribute:: name
      :value: None



   .. py:attribute:: email
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: status
      :value: None



   .. py:attribute:: organization_groups


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: UserException(client, type=None, value=None, traceback=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Exception information for errors in usercode.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param type: The type of exception
   :type type: str
   :param value: The value of the exception
   :type value: str
   :param traceback: The traceback of the exception
   :type traceback: str


   .. py:attribute:: type
      :value: None



   .. py:attribute:: value
      :value: None



   .. py:attribute:: traceback
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: VideoGenSettings(client, model=None, settings=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Video generation settings

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param model: The model settings.
   :type model: dict
   :param settings: The settings for each model.
   :type settings: dict


   .. py:attribute:: model
      :value: None



   .. py:attribute:: settings
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: VideoSearchResult(client, title=None, url=None, thumbnailUrl=None, motionThumbnailUrl=None, embedUrl=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single video search result.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param title: The title of the video.
   :type title: str
   :param url: The URL of the video.
   :type url: str
   :param thumbnailUrl: The URL of the thumbnail of the video.
   :type thumbnailUrl: str
   :param motionThumbnailUrl: The URL of the motion thumbnail of the video.
   :type motionThumbnailUrl: str
   :param embedUrl: The URL of the embed of the video.
   :type embedUrl: str


   .. py:attribute:: title
      :value: None



   .. py:attribute:: url
      :value: None



   .. py:attribute:: thumbnail_url
      :value: None



   .. py:attribute:: motion_thumbnail_url
      :value: None



   .. py:attribute:: embed_url
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: VoiceGenDetails(client, model=None, voice=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Voice generation details

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param model: The model used for voice generation.
   :type model: str
   :param voice: The voice details.
   :type voice: dict


   .. py:attribute:: model
      :value: None



   .. py:attribute:: voice
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: WebPageResponse(client, content=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A scraped web page response

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param content: The content of the web page.
   :type content: str


   .. py:attribute:: content
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: WebSearchResponse(client, searchResults={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   Result of running a web search with optional content fetching.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param searchResults: List of search results.
   :type searchResults: WebSearchResult


   .. py:attribute:: search_results


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: WebSearchResult(client, title=None, url=None, snippet=None, news=None, place=None, entity=None, content=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A single search result.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param title: The title of the search result.
   :type title: str
   :param url: The URL of the search result.
   :type url: str
   :param snippet: The snippet of the search result.
   :type snippet: str
   :param news: The news search result (if any)
   :type news: str
   :param place: The place search result (if any)
   :type place: str
   :param entity: The entity search result (if any)
   :type entity: str
   :param content: The page of content fetched from the url.
   :type content: str


   .. py:attribute:: title
      :value: None



   .. py:attribute:: url
      :value: None



   .. py:attribute:: snippet
      :value: None



   .. py:attribute:: news
      :value: None



   .. py:attribute:: place
      :value: None



   .. py:attribute:: entity
      :value: None



   .. py:attribute:: content
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: Webhook(client, webhookId=None, deploymentId=None, endpoint=None, webhookEventType=None, payloadTemplate=None, createdAt=None)

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A Abacus.AI Webhook attached to an endpoint and event trigger for a given object.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param webhookId: Unique identifier for this webhook.
   :type webhookId: str
   :param deploymentId: Identifier for the deployment this webhook is attached to.
   :type deploymentId: str
   :param endpoint: The URI this webhook will send HTTP POST requests to.
   :type endpoint: str
   :param webhookEventType: The event that triggers the webhook action.
   :type webhookEventType: str
   :param payloadTemplate: Template for JSON Dictionary to be sent as the body of the POST request.
   :type payloadTemplate: str
   :param createdAt: The date and time this webhook was created.
   :type createdAt: str


   .. py:attribute:: webhook_id
      :value: None



   .. py:attribute:: deployment_id
      :value: None



   .. py:attribute:: endpoint
      :value: None



   .. py:attribute:: webhook_event_type
      :value: None



   .. py:attribute:: payload_template
      :value: None



   .. py:attribute:: created_at
      :value: None



   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



   .. py:method:: refresh()

      Calls describe and refreshes the current object's fields

      :returns: The current object
      :rtype: Webhook



   .. py:method:: describe()

      Describe the webhook with a given ID.

      :param webhook_id: Unique string identifier of the target webhook.
      :type webhook_id: str

      :returns: The webhook with the given ID.
      :rtype: Webhook



   .. py:method:: update(endpoint = None, webhook_event_type = None, payload_template = None)

      Update the webhook

      :param endpoint: If provided, changes the webhook's endpoint.
      :type endpoint: str
      :param webhook_event_type: If provided, changes the event type.
      :type webhook_event_type: str
      :param payload_template: If provided, changes the payload template.
      :type payload_template: dict



   .. py:method:: delete()

      Delete the webhook

      :param webhook_id: Unique identifier of the target webhook.
      :type webhook_id: str



.. py:class:: WorkflowGraphNodeDetails(client, packageRequirements=None, connectors=None, workflowGraphNode={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A workflow graph node in the workflow graph.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param packageRequirements: A list of package requirements that the node source code will need.
   :type packageRequirements: list[str]
   :param connectors: A dictionary of connectors that the node source code will need.
   :type connectors: dict
   :param workflowGraphNode: The workflow graph node object.
   :type workflowGraphNode: WorkflowGraphNode


   .. py:attribute:: package_requirements
      :value: None



   .. py:attribute:: connectors
      :value: None



   .. py:attribute:: workflow_graph_node


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:class:: WorkflowNodeTemplate(client, workflowNodeTemplateId=None, name=None, functionName=None, sourceCode=None, description=None, packageRequirements=None, tags=None, additionalConfigs=None, inputs={}, outputs={}, templateConfigs={})

   Bases: :py:obj:`abacusai.return_class.AbstractApiClass`


   A workflow node template.

   :param client: An authenticated API Client instance
   :type client: ApiClient
   :param workflowNodeTemplateId: The unique identifier of the workflow node template.
   :type workflowNodeTemplateId: str
   :param name: The name of the workflow node template.
   :type name: str
   :param functionName: The function name of the workflow node function.
   :type functionName: str
   :param sourceCode: The source code of the function that the workflow node template will execute.
   :type sourceCode: str
   :param description: A description of the workflow node template.
   :type description: str
   :param packageRequirements: A list of package requirements that the node source code may need.
   :type packageRequirements: list[str]
   :param tags: Tags to add to the workflow node template. It contains information on the intended usage of template.
   :type tags: dict
   :param additionalConfigs: Additional configurations for the workflow node template.
   :type additionalConfigs: dict
   :param inputs: A list of inputs that the workflow node template will use.
   :type inputs: WorkflowNodeTemplateInput
   :param outputs: A list of outputs that the workflow node template will give.
   :type outputs: WorkflowNodeTemplateOutput
   :param templateConfigs: A list of template configs that are hydrated into source to get complete code.
   :type templateConfigs: WorkflowNodeTemplateConfig


   .. py:attribute:: workflow_node_template_id
      :value: None



   .. py:attribute:: name
      :value: None



   .. py:attribute:: function_name
      :value: None



   .. py:attribute:: source_code
      :value: None



   .. py:attribute:: description
      :value: None



   .. py:attribute:: package_requirements
      :value: None



   .. py:attribute:: tags
      :value: None



   .. py:attribute:: additional_configs
      :value: None



   .. py:attribute:: inputs


   .. py:attribute:: outputs


   .. py:attribute:: template_configs


   .. py:attribute:: deprecated_keys


   .. py:method:: __repr__()


   .. py:method:: to_dict()

      Get a dict representation of the parameters in this class

      :returns: The dict value representation of the class parameters
      :rtype: dict



.. py:data:: __version__
   :value: '1.4.37'