@@ -37,13 +37,14 @@ Here are a few examples of what you can access from the project object:
3737import dlt
3838
3939# show the currently active profile
40- print (dlt.hub.current.project.config().current_profile)
40+ # TODO : remove ignore when dlthub plugin releases
41+ print (dlt.hub.current.project.config().current_profile) # type: ignore
4142# show the main project dir
42- print (dlt.hub.current.project.config().project_dir)
43+ print (dlt.hub.current.project.config().project_dir) # type: ignore
4344# show the project config dict
44- print (dlt.hub.current.project.project().config)
45+ print (dlt.hub.current.project.project().config) # type: ignore
4546# list explicitly defined datasets (also works with destinations, sources, pipelines, etc.)
46- print (dlt.hub.current.project.project().datasets)
47+ print (dlt.hub.current.project.project().datasets) # type: ignore
4748```
4849## Accessing entities
4950
@@ -52,7 +53,7 @@ If allowed, implicit entities will be created and returned automatically. If not
5253``` py
5354import dlt
5455
55- entities = dlt.hub.current.project.entities()
56+ entities = dlt.hub.current.project.entities() # type: ignore
5657pipeline = entities.get_pipeline(" my_pipeline" )
5758destination = entities.get_destination(" duckdb" )
5859transformation = entities.get_transformation(" stressed_transformation" )
@@ -69,7 +70,7 @@ You can also use it directly in your code through the project context:
6970import dlt
7071
7172# get the runner
72- runner = dlt.hub.current.project.runner()
73+ runner = dlt.hub.current.project.runner() # type: ignore
7374# run the "my_pipeline" pipeline from the currently active project
7475runner.run_pipeline(" my_pipeline" )
7576```
@@ -83,7 +84,7 @@ import dlt
8384
8485# Get a dataset instance pointing to the default destination (first in dataset destinations list) and access data inside of it
8586# Note: The dataset must already exist physically for this to work
86- dataset = dlt.hub.current.project.catalog().dataset(" my_pipeline_dataset" )
87+ dataset = dlt.hub.current.project.catalog().dataset(" my_pipeline_dataset" ) # type: ignore
8788# Get the row counts of all tables in the dataset as a dataframe
8889print (dataset.row_counts().df())
8990```
@@ -109,7 +110,7 @@ import pandas as pd
109110import dlt
110111
111112# Get a dataset from the catalog (it must already exist and be defined in dlt.yml)
112- dataset = dlt.hub.current.project.catalog().dataset(" my_pipeline_dataset" )
113+ dataset = dlt.hub.current.project.catalog().dataset(" my_pipeline_dataset" ) # type: ignore
113114# Write a DataFrame to the "my_table" table in the dataset
114115dataset.save(pd.DataFrame({" name" : [" John" , " Jane" , " Jim" ], " age" : [30 , 25 , 35 ]}), table_name = " my_table" )
115116```
@@ -120,7 +121,7 @@ You can also read from an existing table and write the data to a new table, eith
120121import dlt
121122
122123# Get dataset from the catalog
123- dataset = dlt.hub.current.project.catalog().dataset(" my_pipeline_dataset" )
124+ dataset = dlt.hub.current.project.catalog().dataset(" my_pipeline_dataset" ) # type: ignore
124125
125126# This function reads data in chunks from an existing table and yields each chunk
126127def transform_frames ():
@@ -141,14 +142,14 @@ You can switch to a different profile using the `switch_profile` function.
141142Here’s an example:
142143
143144``` py
144- from dlt.hub.current import project
145+ from dlt.hub.current import project # type: ignore
145146
146147
147148if __name__ == " __main__" :
148149 # Shows the current active profile
149150 print (project.config().current_profile)
150151 # Switch to the tests profile
151- .context().switch_profile(" tests" )
152+ project .context().switch_profile(" tests" )
152153 # Now "tests" is the active profile, merged with the project config
153154 print (project.config().current_profile)
154155```
0 commit comments