Skip to content

Index

APIReader

Bases: BaseReader

Utility class for reading an API into a DataFrame.

This class uses an APIClient to fetch data from an API and load it into a Spark DataFrame.

Attributes:

Name Type Description
api_client

The client for making API requests.

Source code in src/cloe_nessy/integration/reader/api_reader.py
class APIReader(BaseReader):
    """Utility class for reading an API into a DataFrame.

    This class uses an APIClient to fetch data from an API and load it into a Spark DataFrame.

    Attributes:
        api_client: The client for making API requests.
    """

    def __init__(self, base_url: str, auth: AuthBase | None, default_headers: dict[str, str] | None = None):
        """Initializes the APIReader object.

        Args:
            base_url : The base URL for the API.
            auth: The authentication method for the API.
            default_headers: Default headers to include in requests.
        """
        super().__init__()
        self.api_client = APIClient(base_url, auth, default_headers)

    def read(
        self,
        *,
        endpoint: str = "",
        method: str = "GET",
        key: str | None = None,
        timeout: int = 30,
        params: dict[str, str] | None = None,
        headers: dict[str, str] | None = None,
        data: dict[str, str] | None = None,
        json_body: dict[str, str] | None = None,
        max_retries: int = 0,
        options: dict[str, str] | None = None,
        add_metadata_column: bool = False,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads data from an API endpoint and returns it as a DataFrame.

        Args:
            endpoint: The endpoint to send the request to.
            method: The HTTP method to use for the request.
            key: The key to extract from the JSON response.
            timeout: The timeout for the request in seconds.
            params: The query parameters for the request.
            headers: The headers to include in the request.
            data: The form data to include in the request.
            json_body: The JSON data to include in the request.
            max_retries: The maximum number of retries for the request.
            options: Additional options for the createDataFrame function.
            add_metadata_column: If set, adds a __metadata column containing metadata about the API response.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

        Returns:
            DataFrame: The Spark DataFrame containing the read data in the json_object column.

        Raises:
            RuntimeError: If there is an error with the API request or reading the data.
        """
        if options is None:
            options = {}
        try:
            response = self.api_client.request(
                method=method,
                endpoint=endpoint,
                timeout=timeout,
                params=params,
                headers=headers,
                data=data,
                json=json_body,
                max_retries=max_retries,
            )
            data_list = response.to_dict(key)
            json_string = json.dumps(data_list)
            df: DataFrame = self._spark.createDataFrame(data={json_string}, schema=["json_string"], **options)  # type: ignore
            row = df.select("json_string").head()
            if row is not None:
                schema = F.schema_of_json(row[0])
            else:
                raise RuntimeError("It was not possible to infer the schema of the JSON data.")
            df_result = df.withColumn("json_object", F.from_json("json_string", schema)).select("json_object")
            if add_metadata_column:
                df_result = self._add_metadata_column(df_result, response)
            return df_result

        except (APIClientHTTPError, APIClientConnectionError, APIClientTimeoutError) as e:
            raise RuntimeError(f"API request failed: {e}") from e
        except APIClientError as e:
            raise RuntimeError(f"An error occurred while reading the API data: {e}") from e
        except Exception as e:
            raise RuntimeError(f"An unexpected error occurred: {e}") from e

    def _add_metadata_column(self, df: DataFrame, response: APIResponse):
        """Adds a metadata column to a DataFrame.

        This method appends a column named `__metadata` to the given DataFrame, containing a map
        of metadata related to an API response. The metadata includes the current timestamp,
        the base URL of the API, the URL of the request, the HTTP status code, the reason phrase,
        and the elapsed time of the request in seconds.

        Args:
            df: The DataFrame to which the metadata column will be added.
            response: The API response object containing the metadata to be added.

        Returns:
            DataFrame: The original DataFrame with an added `__metadata` column containing the API response metadata.
        """
        df = df.withColumn(
            "__metadata",
            F.create_map(
                F.lit("timestamp"),
                F.current_timestamp(),
                F.lit("base_url"),
                F.lit(self.api_client.base_url),
                F.lit("url"),
                F.lit(response.url),
                F.lit("status_code"),
                F.lit(response.status_code),
                F.lit("reason"),
                F.lit(response.reason),
                F.lit("elapsed"),
                F.lit(response.elapsed),
            ),
        )
        return df

__init__(base_url, auth, default_headers=None)

Initializes the APIReader object.

Parameters:

Name Type Description Default
base_url

The base URL for the API.

required
auth AuthBase | None

The authentication method for the API.

required
default_headers dict[str, str] | None

Default headers to include in requests.

None
Source code in src/cloe_nessy/integration/reader/api_reader.py
def __init__(self, base_url: str, auth: AuthBase | None, default_headers: dict[str, str] | None = None):
    """Initializes the APIReader object.

    Args:
        base_url : The base URL for the API.
        auth: The authentication method for the API.
        default_headers: Default headers to include in requests.
    """
    super().__init__()
    self.api_client = APIClient(base_url, auth, default_headers)

_add_metadata_column(df, response)

Adds a metadata column to a DataFrame.

This method appends a column named __metadata to the given DataFrame, containing a map of metadata related to an API response. The metadata includes the current timestamp, the base URL of the API, the URL of the request, the HTTP status code, the reason phrase, and the elapsed time of the request in seconds.

Parameters:

Name Type Description Default
df DataFrame

The DataFrame to which the metadata column will be added.

required
response APIResponse

The API response object containing the metadata to be added.

required

Returns:

Name Type Description
DataFrame

The original DataFrame with an added __metadata column containing the API response metadata.

Source code in src/cloe_nessy/integration/reader/api_reader.py
def _add_metadata_column(self, df: DataFrame, response: APIResponse):
    """Adds a metadata column to a DataFrame.

    This method appends a column named `__metadata` to the given DataFrame, containing a map
    of metadata related to an API response. The metadata includes the current timestamp,
    the base URL of the API, the URL of the request, the HTTP status code, the reason phrase,
    and the elapsed time of the request in seconds.

    Args:
        df: The DataFrame to which the metadata column will be added.
        response: The API response object containing the metadata to be added.

    Returns:
        DataFrame: The original DataFrame with an added `__metadata` column containing the API response metadata.
    """
    df = df.withColumn(
        "__metadata",
        F.create_map(
            F.lit("timestamp"),
            F.current_timestamp(),
            F.lit("base_url"),
            F.lit(self.api_client.base_url),
            F.lit("url"),
            F.lit(response.url),
            F.lit("status_code"),
            F.lit(response.status_code),
            F.lit("reason"),
            F.lit(response.reason),
            F.lit("elapsed"),
            F.lit(response.elapsed),
        ),
    )
    return df

read(*, endpoint='', method='GET', key=None, timeout=30, params=None, headers=None, data=None, json_body=None, max_retries=0, options=None, add_metadata_column=False, **kwargs)

Reads data from an API endpoint and returns it as a DataFrame.

Parameters:

Name Type Description Default
endpoint str

The endpoint to send the request to.

''
method str

The HTTP method to use for the request.

'GET'
key str | None

The key to extract from the JSON response.

None
timeout int

The timeout for the request in seconds.

30
params dict[str, str] | None

The query parameters for the request.

None
headers dict[str, str] | None

The headers to include in the request.

None
data dict[str, str] | None

The form data to include in the request.

None
json_body dict[str, str] | None

The JSON data to include in the request.

None
max_retries int

The maximum number of retries for the request.

0
options dict[str, str] | None

Additional options for the createDataFrame function.

None
add_metadata_column bool

If set, adds a __metadata column containing metadata about the API response.

False
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}

Returns:

Name Type Description
DataFrame DataFrame

The Spark DataFrame containing the read data in the json_object column.

Raises:

Type Description
RuntimeError

If there is an error with the API request or reading the data.

Source code in src/cloe_nessy/integration/reader/api_reader.py
def read(
    self,
    *,
    endpoint: str = "",
    method: str = "GET",
    key: str | None = None,
    timeout: int = 30,
    params: dict[str, str] | None = None,
    headers: dict[str, str] | None = None,
    data: dict[str, str] | None = None,
    json_body: dict[str, str] | None = None,
    max_retries: int = 0,
    options: dict[str, str] | None = None,
    add_metadata_column: bool = False,
    **kwargs: Any,
) -> DataFrame:
    """Reads data from an API endpoint and returns it as a DataFrame.

    Args:
        endpoint: The endpoint to send the request to.
        method: The HTTP method to use for the request.
        key: The key to extract from the JSON response.
        timeout: The timeout for the request in seconds.
        params: The query parameters for the request.
        headers: The headers to include in the request.
        data: The form data to include in the request.
        json_body: The JSON data to include in the request.
        max_retries: The maximum number of retries for the request.
        options: Additional options for the createDataFrame function.
        add_metadata_column: If set, adds a __metadata column containing metadata about the API response.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

    Returns:
        DataFrame: The Spark DataFrame containing the read data in the json_object column.

    Raises:
        RuntimeError: If there is an error with the API request or reading the data.
    """
    if options is None:
        options = {}
    try:
        response = self.api_client.request(
            method=method,
            endpoint=endpoint,
            timeout=timeout,
            params=params,
            headers=headers,
            data=data,
            json=json_body,
            max_retries=max_retries,
        )
        data_list = response.to_dict(key)
        json_string = json.dumps(data_list)
        df: DataFrame = self._spark.createDataFrame(data={json_string}, schema=["json_string"], **options)  # type: ignore
        row = df.select("json_string").head()
        if row is not None:
            schema = F.schema_of_json(row[0])
        else:
            raise RuntimeError("It was not possible to infer the schema of the JSON data.")
        df_result = df.withColumn("json_object", F.from_json("json_string", schema)).select("json_object")
        if add_metadata_column:
            df_result = self._add_metadata_column(df_result, response)
        return df_result

    except (APIClientHTTPError, APIClientConnectionError, APIClientTimeoutError) as e:
        raise RuntimeError(f"API request failed: {e}") from e
    except APIClientError as e:
        raise RuntimeError(f"An error occurred while reading the API data: {e}") from e
    except Exception as e:
        raise RuntimeError(f"An unexpected error occurred: {e}") from e

CatalogReader

Bases: BaseReader

A reader for Unity Catalog objects.

This class reads data from a Unity Catalog table and loads it into a Spark DataFrame.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
class CatalogReader(BaseReader):
    """A reader for Unity Catalog objects.

    This class reads data from a Unity Catalog table and loads it into a Spark DataFrame.
    """

    def __init__(self):
        """Initializes the CatalogReader object."""
        super().__init__()

    def read(
        self,
        table_identifier: str = "",
        *,
        options: dict[str, str] | None = None,
        delta_load_options: DeltaLoadOptions | None = None,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads a table from the Unity Catalog.

        Args:
            table_identifier: The table identifier in the Unity Catalog in the format 'catalog.schema.table'.
            options: PySpark options for the read table operation.
            delta_load_options: Options for delta loading, if applicable. When provided, uses delta loader
                instead of regular table read to perform incremental loading.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

        Returns:
            The Spark DataFrame containing the read data.

        Raises:
            ValueError: If the table_identifier is not provided, is not a string, or is not in the correct format.
            ReadOperationFailedError: For delta load or table read failures.
        """
        if options is None:
            options = {}
        if not table_identifier:
            raise ValueError("table_identifier is required")
        if not isinstance(table_identifier, str):
            raise ValueError("table_identifier must be a string")
        if len(table_identifier.split(".")) != 3:
            raise ValueError("table_identifier must be in the format 'catalog.schema.table'")

        try:
            if delta_load_options:
                # Use delta loader for incremental loading
                self._console_logger.info(f"Performing delta load for table: {table_identifier}")
                delta_loader = DeltaLoaderFactory.create_loader(
                    table_identifier=table_identifier,
                    options=delta_load_options,
                )
                df = delta_loader.read_data(options=options)
                self._console_logger.info(f"Delta load completed for table: {table_identifier}")
                return df

            # Regular table read
            df = self._spark.read.table(table_identifier, **options)
            return df
        except AnalysisException as err:
            raise ValueError(f"Table not found: {table_identifier}") from err
        except Exception as err:
            if delta_load_options:
                raise ReadOperationFailedError(f"Delta load failed for table '{table_identifier}': {err}") from err
            else:
                raise ReadOperationFailedError(
                    f"An error occurred while reading the table '{table_identifier}': {err}"
                ) from err

__init__()

Initializes the CatalogReader object.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
def __init__(self):
    """Initializes the CatalogReader object."""
    super().__init__()

read(table_identifier='', *, options=None, delta_load_options=None, **kwargs)

Reads a table from the Unity Catalog.

Parameters:

Name Type Description Default
table_identifier str

The table identifier in the Unity Catalog in the format 'catalog.schema.table'.

''
options dict[str, str] | None

PySpark options for the read table operation.

None
delta_load_options DeltaLoadOptions | None

Options for delta loading, if applicable. When provided, uses delta loader instead of regular table read to perform incremental loading.

None
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}

Returns:

Type Description
DataFrame

The Spark DataFrame containing the read data.

Raises:

Type Description
ValueError

If the table_identifier is not provided, is not a string, or is not in the correct format.

ReadOperationFailedError

For delta load or table read failures.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
def read(
    self,
    table_identifier: str = "",
    *,
    options: dict[str, str] | None = None,
    delta_load_options: DeltaLoadOptions | None = None,
    **kwargs: Any,
) -> DataFrame:
    """Reads a table from the Unity Catalog.

    Args:
        table_identifier: The table identifier in the Unity Catalog in the format 'catalog.schema.table'.
        options: PySpark options for the read table operation.
        delta_load_options: Options for delta loading, if applicable. When provided, uses delta loader
            instead of regular table read to perform incremental loading.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

    Returns:
        The Spark DataFrame containing the read data.

    Raises:
        ValueError: If the table_identifier is not provided, is not a string, or is not in the correct format.
        ReadOperationFailedError: For delta load or table read failures.
    """
    if options is None:
        options = {}
    if not table_identifier:
        raise ValueError("table_identifier is required")
    if not isinstance(table_identifier, str):
        raise ValueError("table_identifier must be a string")
    if len(table_identifier.split(".")) != 3:
        raise ValueError("table_identifier must be in the format 'catalog.schema.table'")

    try:
        if delta_load_options:
            # Use delta loader for incremental loading
            self._console_logger.info(f"Performing delta load for table: {table_identifier}")
            delta_loader = DeltaLoaderFactory.create_loader(
                table_identifier=table_identifier,
                options=delta_load_options,
            )
            df = delta_loader.read_data(options=options)
            self._console_logger.info(f"Delta load completed for table: {table_identifier}")
            return df

        # Regular table read
        df = self._spark.read.table(table_identifier, **options)
        return df
    except AnalysisException as err:
        raise ValueError(f"Table not found: {table_identifier}") from err
    except Exception as err:
        if delta_load_options:
            raise ReadOperationFailedError(f"Delta load failed for table '{table_identifier}': {err}") from err
        else:
            raise ReadOperationFailedError(
                f"An error occurred while reading the table '{table_identifier}': {err}"
            ) from err

ExcelDataFrameReader

Bases: BaseReader

Utility class for reading an Excel file into a DataFrame.

This class uses the Pandas API on Spark to read Excel files to a DataFrame. More information can be found in the official documentation.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
class ExcelDataFrameReader(BaseReader):
    """Utility class for reading an Excel file into a DataFrame.

    This class uses the Pandas API on Spark to read Excel files to a DataFrame.
    More information can be found in the [official
    documentation](https://spark.apache.org/docs/latest/api/python/reference/pyspark.pandas/index.html).
    """

    def __init__(self):
        """Initializes the ExcelDataFrameReader object."""
        super().__init__()

    def read_stream(self) -> DataFrame:
        """Currently not implemented."""
        raise NotImplementedError("Currently not implemented.")

    def read(
        self,
        location: str,
        *,
        sheet_name: str | int | list = 0,
        header: int | list[int] = 0,
        index_col: int | list[int] | None = None,
        usecols: int | str | list | Callable | None = None,
        true_values: list | None = None,
        false_values: list | None = None,
        nrows: int | None = None,
        na_values: list[str] | dict[str, list[str]] | None = None,
        keep_default_na: bool = True,
        parse_dates: bool | list | dict = False,
        date_parser: Callable | None = None,
        thousands: str | None = None,
        options: dict | None = None,
        load_as_strings: bool = False,
        add_metadata_column: bool = False,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads Excel file on specified location and returns DataFrame.

        Args:
            location: Location of files to read.
            sheet_name: Strings are used for sheet names.
                Integers are used in zero-indexed sheet positions. Lists of
                strings/integers are used to request multiple sheets. Specify None
                to get all sheets.
            header: Row to use for column labels. If a
                list of integers is passed those row positions will be combined. Use
                None if there is no header.
            index_col: Column to use as the row labels of the
                DataFrame. Pass None if there is no such column. If a list is
                passed, those columns will be combined.
            usecols: Return a subset of the columns. If
                None, then parse all columns. If str, then indicates comma separated
                list of Excel column letters and column ranges (e.g. “A:E” or
                “A,C,E:F”). Ranges are inclusive of both sides. nIf list of int,
                then indicates list of column numbers to be parsed. If list of
                string, then indicates list of column names to be parsed. If
                Callable, then evaluate each column name against it and parse the
                column if the Callable returns True.
            true_values: Values to consider as True.
            false_values: Values to consider as False.
            nrows: Number of rows to parse.
            na_values: Additional strings to recognize as
                NA/NaN. If dict passed, specific per-column NA values.
            keep_default_na: If na_values are specified and
                keep_default_na is False the default NaN values are overridden,
                otherwise they're appended to.
            parse_dates: The behavior is as follows:
                - bool. If True -> try parsing the index.
                - list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
                - list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
                - dict, e.g. {{"foo" : [1, 3]}} -> parse columns 1, 3 as date and call result "foo"
                If a column or index contains an unparseable date, the entire column or index will be returned unaltered as an object data type.
            date_parser: Function to use for converting a sequence of
                string columns to an array of datetime instances. The default uses
                dateutil.parser.parser to do the conversion.
            thousands: Thousands separator for parsing string columns to
                numeric. Note that this parameter is only necessary for columns
                stored as TEXT in Excel, any numeric columns will automatically be
                parsed, regardless of display format.
            options: Optional keyword arguments passed to
                pyspark.pandas.read_excel and handed to TextFileReader.
            load_as_strings: If True, converts all columns to string type to avoid datatype conversion errors in Spark.
            add_metadata_column: If True, adds a metadata column containing the file location and sheet name.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.
        """
        if options is None:
            options = {}
        if ".xls" not in location:
            raise ValueError(
                "The excel reader can only be used for files with extension .xls. Use FileReader or some other reader instead."
            )
        try:
            df = pd.read_excel(  # type: ignore
                location,
                sheet_name=sheet_name,
                header=header,
                index_col=index_col,
                usecols=usecols,
                true_values=true_values,
                false_values=false_values,
                nrows=nrows,
                na_values=na_values,
                keep_default_na=keep_default_na,
                parse_dates=parse_dates,
                date_parser=date_parser,
                thousands=thousands,
                dtype="string" if load_as_strings else None,
                **options,
            )
            if isinstance(df, dict):
                # in case pandas.read_excel returns a dict, union to single df
                df = pd.concat(list(df.values()), ignore_index=True)

        except FileNotFoundError:
            self._console_logger.error(f"No xls(x) file was found at the specified location [ '{location}' ].")
            raise
        except Exception as e:
            self._console_logger.error(f"read file [ '{location}' ] failed. Error: {e}")
        else:
            self._console_logger.info(f"Read file [ '{location}' ] succeeded.")

        spark_df = self._spark.createDataFrame(df)
        if add_metadata_column:
            spark_df = self._add_metadata_column(df=spark_df, location=location, sheet_name=sheet_name)
        return spark_df

    def _add_metadata_column(self, df: DataFrame, location: str, sheet_name: str | int | list):
        """Adds a metadata column to a DataFrame.

        This method appends a column named `__metadata` to the given DataFrame, containing a map
        of metadata related to the Excel file read operation. The metadata includes the current
        timestamp, the location of the Excel file, and the sheet name(s) from which the data was read.

        Args:
            df: The DataFrame to which the metadata column will be added.
            location: The file path of the Excel file.
            sheet_name: The sheet name or sheet index used when reading the Excel file.

        Returns:
            DataFrame: The original DataFrame with an added `__metadata` column containing the Excel file metadata.
        """
        # Convert sheet_name to string if it is not already a string
        if isinstance(sheet_name, list):
            sheet_name = ", ".join(map(str, sheet_name))
        else:
            sheet_name = str(sheet_name)

        df = df.withColumn(
            "__metadata",
            F.create_map(
                F.lit("timestamp"),
                F.current_timestamp(),
                F.lit("file_location"),
                F.lit(location),
                F.lit("sheet_name"),
                F.lit(sheet_name),
            ),
        )
        return df

__init__()

Initializes the ExcelDataFrameReader object.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
def __init__(self):
    """Initializes the ExcelDataFrameReader object."""
    super().__init__()

_add_metadata_column(df, location, sheet_name)

Adds a metadata column to a DataFrame.

This method appends a column named __metadata to the given DataFrame, containing a map of metadata related to the Excel file read operation. The metadata includes the current timestamp, the location of the Excel file, and the sheet name(s) from which the data was read.

Parameters:

Name Type Description Default
df DataFrame

The DataFrame to which the metadata column will be added.

required
location str

The file path of the Excel file.

required
sheet_name str | int | list

The sheet name or sheet index used when reading the Excel file.

required

Returns:

Name Type Description
DataFrame

The original DataFrame with an added __metadata column containing the Excel file metadata.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
def _add_metadata_column(self, df: DataFrame, location: str, sheet_name: str | int | list):
    """Adds a metadata column to a DataFrame.

    This method appends a column named `__metadata` to the given DataFrame, containing a map
    of metadata related to the Excel file read operation. The metadata includes the current
    timestamp, the location of the Excel file, and the sheet name(s) from which the data was read.

    Args:
        df: The DataFrame to which the metadata column will be added.
        location: The file path of the Excel file.
        sheet_name: The sheet name or sheet index used when reading the Excel file.

    Returns:
        DataFrame: The original DataFrame with an added `__metadata` column containing the Excel file metadata.
    """
    # Convert sheet_name to string if it is not already a string
    if isinstance(sheet_name, list):
        sheet_name = ", ".join(map(str, sheet_name))
    else:
        sheet_name = str(sheet_name)

    df = df.withColumn(
        "__metadata",
        F.create_map(
            F.lit("timestamp"),
            F.current_timestamp(),
            F.lit("file_location"),
            F.lit(location),
            F.lit("sheet_name"),
            F.lit(sheet_name),
        ),
    )
    return df

read(location, *, sheet_name=0, header=0, index_col=None, usecols=None, true_values=None, false_values=None, nrows=None, na_values=None, keep_default_na=True, parse_dates=False, date_parser=None, thousands=None, options=None, load_as_strings=False, add_metadata_column=False, **kwargs)

Reads Excel file on specified location and returns DataFrame.

Parameters:

Name Type Description Default
location str

Location of files to read.

required
sheet_name str | int | list

Strings are used for sheet names. Integers are used in zero-indexed sheet positions. Lists of strings/integers are used to request multiple sheets. Specify None to get all sheets.

0
header int | list[int]

Row to use for column labels. If a list of integers is passed those row positions will be combined. Use None if there is no header.

0
index_col int | list[int] | None

Column to use as the row labels of the DataFrame. Pass None if there is no such column. If a list is passed, those columns will be combined.

None
usecols int | str | list | Callable | None

Return a subset of the columns. If None, then parse all columns. If str, then indicates comma separated list of Excel column letters and column ranges (e.g. “A:E” or “A,C,E:F”). Ranges are inclusive of both sides. nIf list of int, then indicates list of column numbers to be parsed. If list of string, then indicates list of column names to be parsed. If Callable, then evaluate each column name against it and parse the column if the Callable returns True.

None
true_values list | None

Values to consider as True.

None
false_values list | None

Values to consider as False.

None
nrows int | None

Number of rows to parse.

None
na_values list[str] | dict[str, list[str]] | None

Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values.

None
keep_default_na bool

If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to.

True
parse_dates bool | list | dict

The behavior is as follows: - bool. If True -> try parsing the index. - list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. - list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. - dict, e.g. {{"foo" : [1, 3]}} -> parse columns 1, 3 as date and call result "foo" If a column or index contains an unparseable date, the entire column or index will be returned unaltered as an object data type.

False
date_parser Callable | None

Function to use for converting a sequence of string columns to an array of datetime instances. The default uses dateutil.parser.parser to do the conversion.

None
thousands str | None

Thousands separator for parsing string columns to numeric. Note that this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format.

None
options dict | None

Optional keyword arguments passed to pyspark.pandas.read_excel and handed to TextFileReader.

None
load_as_strings bool

If True, converts all columns to string type to avoid datatype conversion errors in Spark.

False
add_metadata_column bool

If True, adds a metadata column containing the file location and sheet name.

False
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}
Source code in src/cloe_nessy/integration/reader/excel_reader.py
def read(
    self,
    location: str,
    *,
    sheet_name: str | int | list = 0,
    header: int | list[int] = 0,
    index_col: int | list[int] | None = None,
    usecols: int | str | list | Callable | None = None,
    true_values: list | None = None,
    false_values: list | None = None,
    nrows: int | None = None,
    na_values: list[str] | dict[str, list[str]] | None = None,
    keep_default_na: bool = True,
    parse_dates: bool | list | dict = False,
    date_parser: Callable | None = None,
    thousands: str | None = None,
    options: dict | None = None,
    load_as_strings: bool = False,
    add_metadata_column: bool = False,
    **kwargs: Any,
) -> DataFrame:
    """Reads Excel file on specified location and returns DataFrame.

    Args:
        location: Location of files to read.
        sheet_name: Strings are used for sheet names.
            Integers are used in zero-indexed sheet positions. Lists of
            strings/integers are used to request multiple sheets. Specify None
            to get all sheets.
        header: Row to use for column labels. If a
            list of integers is passed those row positions will be combined. Use
            None if there is no header.
        index_col: Column to use as the row labels of the
            DataFrame. Pass None if there is no such column. If a list is
            passed, those columns will be combined.
        usecols: Return a subset of the columns. If
            None, then parse all columns. If str, then indicates comma separated
            list of Excel column letters and column ranges (e.g. “A:E” or
            “A,C,E:F”). Ranges are inclusive of both sides. nIf list of int,
            then indicates list of column numbers to be parsed. If list of
            string, then indicates list of column names to be parsed. If
            Callable, then evaluate each column name against it and parse the
            column if the Callable returns True.
        true_values: Values to consider as True.
        false_values: Values to consider as False.
        nrows: Number of rows to parse.
        na_values: Additional strings to recognize as
            NA/NaN. If dict passed, specific per-column NA values.
        keep_default_na: If na_values are specified and
            keep_default_na is False the default NaN values are overridden,
            otherwise they're appended to.
        parse_dates: The behavior is as follows:
            - bool. If True -> try parsing the index.
            - list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
            - list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
            - dict, e.g. {{"foo" : [1, 3]}} -> parse columns 1, 3 as date and call result "foo"
            If a column or index contains an unparseable date, the entire column or index will be returned unaltered as an object data type.
        date_parser: Function to use for converting a sequence of
            string columns to an array of datetime instances. The default uses
            dateutil.parser.parser to do the conversion.
        thousands: Thousands separator for parsing string columns to
            numeric. Note that this parameter is only necessary for columns
            stored as TEXT in Excel, any numeric columns will automatically be
            parsed, regardless of display format.
        options: Optional keyword arguments passed to
            pyspark.pandas.read_excel and handed to TextFileReader.
        load_as_strings: If True, converts all columns to string type to avoid datatype conversion errors in Spark.
        add_metadata_column: If True, adds a metadata column containing the file location and sheet name.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.
    """
    if options is None:
        options = {}
    if ".xls" not in location:
        raise ValueError(
            "The excel reader can only be used for files with extension .xls. Use FileReader or some other reader instead."
        )
    try:
        df = pd.read_excel(  # type: ignore
            location,
            sheet_name=sheet_name,
            header=header,
            index_col=index_col,
            usecols=usecols,
            true_values=true_values,
            false_values=false_values,
            nrows=nrows,
            na_values=na_values,
            keep_default_na=keep_default_na,
            parse_dates=parse_dates,
            date_parser=date_parser,
            thousands=thousands,
            dtype="string" if load_as_strings else None,
            **options,
        )
        if isinstance(df, dict):
            # in case pandas.read_excel returns a dict, union to single df
            df = pd.concat(list(df.values()), ignore_index=True)

    except FileNotFoundError:
        self._console_logger.error(f"No xls(x) file was found at the specified location [ '{location}' ].")
        raise
    except Exception as e:
        self._console_logger.error(f"read file [ '{location}' ] failed. Error: {e}")
    else:
        self._console_logger.info(f"Read file [ '{location}' ] succeeded.")

    spark_df = self._spark.createDataFrame(df)
    if add_metadata_column:
        spark_df = self._add_metadata_column(df=spark_df, location=location, sheet_name=sheet_name)
    return spark_df

read_stream()

Currently not implemented.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
def read_stream(self) -> DataFrame:
    """Currently not implemented."""
    raise NotImplementedError("Currently not implemented.")

FileReader

Bases: BaseReader

Utility class for reading a file into a DataFrame.

This class reads data from files and loads it into a Spark DataFrame.

Source code in src/cloe_nessy/integration/reader/file_reader.py
class FileReader(BaseReader):
    """Utility class for reading a file into a DataFrame.

    This class reads data from files and loads it into a Spark DataFrame.
    """

    def __init__(self):
        """Initializes the FileReader object."""
        super().__init__()

    def _get_reader(self) -> DataFrameReader:
        """Returns a DataFrameReader."""
        return self._spark.read

    def _get_stream_reader(self) -> DataStreamReader:
        """Returns a DataFrameReader."""
        return self._spark.readStream

    def read(
        self,
        location: str,
        *,
        spark_format: str | None = None,
        extension: str | None = None,
        schema: str | None = None,
        search_subdirs: bool = True,
        options: dict | None = None,
        add_metadata_column: bool = False,
        delta_load_options: DeltaLoadOptions | None = None,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads files from a specified location and returns a DataFrame.

        Arguments:
            location: Location of files to read.
            spark_format: Format of files to read. If not provided, it will be inferred from the extension.
            extension: File extension (csv, json, parquet, txt). Used if spark_format is not provided.
            schema: Schema of the file. If None, schema will be inferred.
            search_subdirs: Whether to include files in subdirectories.
            options: Spark DataFrame reader options.
            add_metadata_column: Whether to include __metadata column in the DataFrame.
            delta_load_options: Options for delta loading, if applicable. When provided and spark_format is 'delta',
                uses delta loader for incremental loading of Delta Lake tables.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

        Raises:
            ValueError: If neither spark_format nor extension is provided.
            ValueError: If the provided extension is not supported.
            Exception: If there is an error while reading the files.

        Note:
            - The `spark_format` parameter is used to specify the format of the files to be read.
            - If `spark_format` is not provided, the method will try to infer it from the `extension`.
            - The `extension` parameter is used to specify the file extension (e.g., 'csv', 'json', etc.).
            - If both `spark_format` and `extension` are provided, `spark_format` will take precedence.
            - The method will raise an error if neither `spark_format` nor `extension` is provided.

        Returns:
            A DataFrame containing the data from the files.
        """
        if options is None:
            options = {}

        if not spark_format and not extension:
            raise ValueError("Either spark_format or extension must be provided.")

        # Handle delta loading for Delta Lake tables
        if delta_load_options and (spark_format == "delta" or extension == "delta"):
            self._console_logger.info(f"Performing delta load for Delta table at: {location}")
            try:
                # For Delta tables, use location as table identifier for delta loader
                delta_loader = DeltaLoaderFactory.create_loader(
                    table_identifier=location,
                    options=delta_load_options,
                )
                df = delta_loader.read_data(options=options or {})
                self._console_logger.info(f"Delta load completed for: {location}")
                return df
            except Exception as e:
                self._console_logger.error(f"Delta load failed for '{location}': {e}")
                raise

        self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
        extension_to_datatype_dict = {
            "csv": "csv",
            "json": "json",
            "parquet": "parquet",
            "txt": "text",
            "xml": "xml",
            "delta": "delta",
        }

        if extension and not spark_format:
            if extension not in extension_to_datatype_dict:
                raise ValueError(f"Unsupported file extension: {extension}")
            spark_format = extension_to_datatype_dict[extension]
        self._console_logger.debug(f"Reading files with format: {spark_format}")
        if extension:
            file_paths = get_file_paths(location, extension, search_subdirs, onelake_relative_paths=True)
        else:
            file_paths = [location]
        self._console_logger.debug(f"Found {len(file_paths)} files to read")
        self._console_logger.debug(f"File paths: {file_paths}")
        assert spark_format is not None

        reader = self._get_reader().format(spark_format)
        if schema:
            reader.schema(schema)
        else:
            options["inferSchema"] = True

        self._console_logger.debug(f"Setting options: {options}")
        reader.options(**options)

        try:
            self._console_logger.debug("Loading files into DataFrame")
            df = reader.load([str(p) for p in file_paths])
            self._console_logger.debug("Successfully loaded files into DataFrame")
            if add_metadata_column:
                df = self._add_metadata_column(df)
        except Exception as e:
            self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
            raise
        else:
            self._console_logger.info(f"Successfully read files from [ '{location}' ]")
            return df

    def read_stream(
        self,
        location: str = "",
        schema: StructType | str | None = None,
        format: str = "delta",
        add_metadata_column: bool = False,
        options: dict[str, Any] | None = None,
        **_: Any,
    ) -> DataFrame:
        """Reads specified location as a stream and returns streaming DataFrame.

        Arguments:
            location : Location of files to read.
            format: Format of files to read.
            schema: Schema of the file.
            add_metadata_column: Whether to include __metadata column in the DataFrame.
            options: Spark DataFrame reader options.

        Raises:
            ValueError: If location is not provided.

        Returns:
            A Streaming DataFrame
        """
        if not location:
            raise ValueError("Location is required for streaming.")
        self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
        try:
            if options is None:
                options = {}
            reader = self._get_stream_reader()
            reader.format(format)
            reader.option("rescuedDataColumn", "_rescued_data")
            if schema is None:
                options["inferSchema"] = True
            else:
                reader.schema(schema)
            reader.options(**options)
            df = reader.load(location)
            if add_metadata_column:
                df = self._add_metadata_column(df)
        except Exception as e:
            self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
            raise
        else:
            self._console_logger.info(f"Successfully read files from [ '{location}' ]")
            return df

    def _add_metadata_column(self, df: DataFrame) -> DataFrame:
        """Add all metadata columns to the DataFrame."""
        metadata_columns = df.select("_metadata.*").columns

        entries = [(F.lit(field), F.col(f"_metadata.{field}")) for field in metadata_columns]
        flat_list = [item for tup in entries for item in tup]

        df = df.withColumn("__metadata", F.create_map(flat_list))

        return df

__init__()

Initializes the FileReader object.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def __init__(self):
    """Initializes the FileReader object."""
    super().__init__()

_add_metadata_column(df)

Add all metadata columns to the DataFrame.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def _add_metadata_column(self, df: DataFrame) -> DataFrame:
    """Add all metadata columns to the DataFrame."""
    metadata_columns = df.select("_metadata.*").columns

    entries = [(F.lit(field), F.col(f"_metadata.{field}")) for field in metadata_columns]
    flat_list = [item for tup in entries for item in tup]

    df = df.withColumn("__metadata", F.create_map(flat_list))

    return df

_get_reader()

Returns a DataFrameReader.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def _get_reader(self) -> DataFrameReader:
    """Returns a DataFrameReader."""
    return self._spark.read

_get_stream_reader()

Returns a DataFrameReader.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def _get_stream_reader(self) -> DataStreamReader:
    """Returns a DataFrameReader."""
    return self._spark.readStream

read(location, *, spark_format=None, extension=None, schema=None, search_subdirs=True, options=None, add_metadata_column=False, delta_load_options=None, **kwargs)

Reads files from a specified location and returns a DataFrame.

Parameters:

Name Type Description Default
location str

Location of files to read.

required
spark_format str | None

Format of files to read. If not provided, it will be inferred from the extension.

None
extension str | None

File extension (csv, json, parquet, txt). Used if spark_format is not provided.

None
schema str | None

Schema of the file. If None, schema will be inferred.

None
search_subdirs bool

Whether to include files in subdirectories.

True
options dict | None

Spark DataFrame reader options.

None
add_metadata_column bool

Whether to include __metadata column in the DataFrame.

False
delta_load_options DeltaLoadOptions | None

Options for delta loading, if applicable. When provided and spark_format is 'delta', uses delta loader for incremental loading of Delta Lake tables.

None
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}

Raises:

Type Description
ValueError

If neither spark_format nor extension is provided.

ValueError

If the provided extension is not supported.

Exception

If there is an error while reading the files.

Note
  • The spark_format parameter is used to specify the format of the files to be read.
  • If spark_format is not provided, the method will try to infer it from the extension.
  • The extension parameter is used to specify the file extension (e.g., 'csv', 'json', etc.).
  • If both spark_format and extension are provided, spark_format will take precedence.
  • The method will raise an error if neither spark_format nor extension is provided.

Returns:

Type Description
DataFrame

A DataFrame containing the data from the files.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def read(
    self,
    location: str,
    *,
    spark_format: str | None = None,
    extension: str | None = None,
    schema: str | None = None,
    search_subdirs: bool = True,
    options: dict | None = None,
    add_metadata_column: bool = False,
    delta_load_options: DeltaLoadOptions | None = None,
    **kwargs: Any,
) -> DataFrame:
    """Reads files from a specified location and returns a DataFrame.

    Arguments:
        location: Location of files to read.
        spark_format: Format of files to read. If not provided, it will be inferred from the extension.
        extension: File extension (csv, json, parquet, txt). Used if spark_format is not provided.
        schema: Schema of the file. If None, schema will be inferred.
        search_subdirs: Whether to include files in subdirectories.
        options: Spark DataFrame reader options.
        add_metadata_column: Whether to include __metadata column in the DataFrame.
        delta_load_options: Options for delta loading, if applicable. When provided and spark_format is 'delta',
            uses delta loader for incremental loading of Delta Lake tables.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

    Raises:
        ValueError: If neither spark_format nor extension is provided.
        ValueError: If the provided extension is not supported.
        Exception: If there is an error while reading the files.

    Note:
        - The `spark_format` parameter is used to specify the format of the files to be read.
        - If `spark_format` is not provided, the method will try to infer it from the `extension`.
        - The `extension` parameter is used to specify the file extension (e.g., 'csv', 'json', etc.).
        - If both `spark_format` and `extension` are provided, `spark_format` will take precedence.
        - The method will raise an error if neither `spark_format` nor `extension` is provided.

    Returns:
        A DataFrame containing the data from the files.
    """
    if options is None:
        options = {}

    if not spark_format and not extension:
        raise ValueError("Either spark_format or extension must be provided.")

    # Handle delta loading for Delta Lake tables
    if delta_load_options and (spark_format == "delta" or extension == "delta"):
        self._console_logger.info(f"Performing delta load for Delta table at: {location}")
        try:
            # For Delta tables, use location as table identifier for delta loader
            delta_loader = DeltaLoaderFactory.create_loader(
                table_identifier=location,
                options=delta_load_options,
            )
            df = delta_loader.read_data(options=options or {})
            self._console_logger.info(f"Delta load completed for: {location}")
            return df
        except Exception as e:
            self._console_logger.error(f"Delta load failed for '{location}': {e}")
            raise

    self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
    extension_to_datatype_dict = {
        "csv": "csv",
        "json": "json",
        "parquet": "parquet",
        "txt": "text",
        "xml": "xml",
        "delta": "delta",
    }

    if extension and not spark_format:
        if extension not in extension_to_datatype_dict:
            raise ValueError(f"Unsupported file extension: {extension}")
        spark_format = extension_to_datatype_dict[extension]
    self._console_logger.debug(f"Reading files with format: {spark_format}")
    if extension:
        file_paths = get_file_paths(location, extension, search_subdirs, onelake_relative_paths=True)
    else:
        file_paths = [location]
    self._console_logger.debug(f"Found {len(file_paths)} files to read")
    self._console_logger.debug(f"File paths: {file_paths}")
    assert spark_format is not None

    reader = self._get_reader().format(spark_format)
    if schema:
        reader.schema(schema)
    else:
        options["inferSchema"] = True

    self._console_logger.debug(f"Setting options: {options}")
    reader.options(**options)

    try:
        self._console_logger.debug("Loading files into DataFrame")
        df = reader.load([str(p) for p in file_paths])
        self._console_logger.debug("Successfully loaded files into DataFrame")
        if add_metadata_column:
            df = self._add_metadata_column(df)
    except Exception as e:
        self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
        raise
    else:
        self._console_logger.info(f"Successfully read files from [ '{location}' ]")
        return df

read_stream(location='', schema=None, format='delta', add_metadata_column=False, options=None, **_)

Reads specified location as a stream and returns streaming DataFrame.

Parameters:

Name Type Description Default
location

Location of files to read.

''
format str

Format of files to read.

'delta'
schema StructType | str | None

Schema of the file.

None
add_metadata_column bool

Whether to include __metadata column in the DataFrame.

False
options dict[str, Any] | None

Spark DataFrame reader options.

None

Raises:

Type Description
ValueError

If location is not provided.

Returns:

Type Description
DataFrame

A Streaming DataFrame

Source code in src/cloe_nessy/integration/reader/file_reader.py
def read_stream(
    self,
    location: str = "",
    schema: StructType | str | None = None,
    format: str = "delta",
    add_metadata_column: bool = False,
    options: dict[str, Any] | None = None,
    **_: Any,
) -> DataFrame:
    """Reads specified location as a stream and returns streaming DataFrame.

    Arguments:
        location : Location of files to read.
        format: Format of files to read.
        schema: Schema of the file.
        add_metadata_column: Whether to include __metadata column in the DataFrame.
        options: Spark DataFrame reader options.

    Raises:
        ValueError: If location is not provided.

    Returns:
        A Streaming DataFrame
    """
    if not location:
        raise ValueError("Location is required for streaming.")
    self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
    try:
        if options is None:
            options = {}
        reader = self._get_stream_reader()
        reader.format(format)
        reader.option("rescuedDataColumn", "_rescued_data")
        if schema is None:
            options["inferSchema"] = True
        else:
            reader.schema(schema)
        reader.options(**options)
        df = reader.load(location)
        if add_metadata_column:
            df = self._add_metadata_column(df)
    except Exception as e:
        self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
        raise
    else:
        self._console_logger.info(f"Successfully read files from [ '{location}' ]")
        return df