Dataset Viewer
Auto-converted to Parquet Duplicate
id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
45,191
def test_drop_duplicates(): frame_data = { "A": list(range(3)) * 2, "B": list(range(1, 4)) * 2, "C": list(range(6)), } modin_df = pd.DataFrame(frame_data) pandas_df = pandas.DataFrame(frame_data) # noqa F841 df_equals( modin_df.drop_duplicates(subset=["A", "B"], kee...
def test_drop_duplicates(): frame_data = { "A": list(range(3)) * 2, "B": list(range(1, 4)) * 2, "C": list(range(6)), } modin_df = pd.DataFrame(frame_data) pandas_df = pandas.DataFrame(frame_data) df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep="first", in...
9,363
def test_wrap_var_set(): assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe) for item in wrap_var(set(['foo'])): assert isinstance(item, AnsibleUnsafe)
def test_wrap_var_set(): assert isinstance(wrap_var(set(['foo'])), set) for item in wrap_var(set(['foo'])): assert isinstance(item, AnsibleUnsafe)
24,704
def _declare_qos_parameteres( entity_type: Union[Type[Publisher], Type[Subscription]], node: 'Node', topic_name: Text, qos: QoSProfile, options: QoSOverridingOptions ) -> QoSProfile: """ Declare qos parameters for a Publisher or a Subscription. :param entity_type: Either `rclpy.node.Pub...
def _declare_qos_parameters( entity_type: Union[Type[Publisher], Type[Subscription]], node: 'Node', topic_name: Text, qos: QoSProfile, options: QoSOverridingOptions ) -> QoSProfile: """ Declare qos parameters for a Publisher or a Subscription. :param entity_type: Either `rclpy.node.Publ...
3,123
def test_win_type_freq_return_deprecation(): freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s") with tm.assert_produces_warning(FutureWarning): freq_roll.win_type
def test_win_type_freq_return_deprecation(): freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s") with tm.assert_produces_warning(FutureWarning): assert freq_roll.win_type == "freq"
19,831
def populate_counts(sf, schema, objs_cached, logger): objects_to_count = [objname for objname in objs_cached] counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count) errors = transports_errors + salesforce_errors for error in errors[0:10]: logger.warning(f"Error count...
def populate_counts(sf, schema, objs_cached, logger): objects_to_count = [objname for objname in objs_cached] counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count) errors = transports_errors + salesforce_errors for error in errors[0:10]: logger.warning(f"Error count...
31,982
def main(): install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging) option = option_handler() packs_artifacts_path = option.packs_artifacts_path id_set_path = option.id_set_path extract_destination_path = option.extract_path storage_bucket_name = option.bucket_name service_a...
def main(): install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging) option = option_handler() packs_artifacts_path = option.packs_artifacts_path id_set_path = option.id_set_path extract_destination_path = option.extract_path storage_bucket_name = option.bucket_name service_a...
8,665
def configure(config): config.define_section('currency', CurrencySection, validate=False) config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:') config.currency.configure_setting('enable_regex', 'automatically respond to regex matches:')
def configure(config): config.define_section('currency', CurrencySection, validate=False) config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:') config.currency.configure_setting('enable_regex', 'Automatically respond to regex matches?')
20,224
def process_missing(missing_ids): """Create missing school and alias objects and dump csv of additions. """ csv_out_data = [] csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory, datetime.date.today()) missing_data = process_datafiles(add_s...
def process_missing(missing_ids): """Create missing school and alias objects and dump csv of additions.""" csv_out_data = [] csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory, datetime.date.today()) missing_data = process_datafiles(add_scho...
30,938
def write_data(sheet, data_item, data_headers, workbook, bold, border): if not isinstance(data_item, list): data_item = [data_item] if not data_headers: data_headers = list(data_item[0].keys()) worksheet = workbook.add_worksheet(sheet) row = 0 col = 0 for key in data_headers: ...
def write_data(sheet, data_item, data_headers, workbook, bold, border): if not isinstance(data_item, list): data_item = [data_item] if not data_headers: data_headers = list(data_item[0].keys()) worksheet = workbook.add_worksheet(sheet) row = 0 col = 0 for key in data_headers: ...
31,006
def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]: """ Get a packs dir names from a contribution pull request changed files Args: branch: The contrib branch pr_number: The contrib PR repo: The contrib repo Returns: A list of packs dir names, if found....
def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]: """ Get packs dir names from a contribution pull request changed files Args: branch: The contrib branch pr_number: The contrib PR repo: The contrib repo Returns: A list of packs dir names, if found. ...
44,408
def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array: """ Converts the configuration σ to a 64-bit integer labelling the Hilbert Space. .. Note:: Requires jax >= 0.3.17 and will crash on older versions. Args: hilbert: The Hilbert space σ: A single or a batch of ...
def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array: """ Converts the configuration σ to a 64-bit integer labelling the Hilbert Space. .. Note:: Requires jax >= 0.3.17 and will raise an exception on older versions. Args: hilbert: The Hilbert space σ: A single o...
6,606
def get_or_make_bin(item_code, warehouse): bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse}) if not bin_record: bin_obj = frappe.get_doc({ "doctype": "Bin", "item_code": item_code, "warehouse": warehouse, }) bin_obj.flags.ignore_permissions = 1 bin_obj.insert(...
def get_or_make_bin(item_code, warehouse) -> str: bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse}) if not bin_record: bin_obj = frappe.get_doc({ "doctype": "Bin", "item_code": item_code, "warehouse": warehouse, }) bin_obj.flags.ignore_permissions = 1 bin_obj....
13,911
def _find_excluded_ranges( lines: List[Tuple[int, str]], *, warnings: _ExclusionRangeWarnings, exclude_lines_by_pattern: Optional[str] = None, exclude_branches_by_pattern: Optional[str] = None, exclude_pattern_prefix: str, ) -> Callable[[int], bool]: """ Scan through all lines to find li...
def _find_excluded_ranges( lines: List[Tuple[int, str]], *, warnings: _ExclusionRangeWarnings, exclude_lines_by_pattern: Optional[str] = None, exclude_branches_by_pattern: Optional[str] = None, exclude_pattern_prefix: str, ) -> Callable[[int], bool]: """ Scan through all lines to find li...
5,578
def parse_metar(metar_text, year, month, station_metadata=station_info): """Parse a METAR report in text form into a list of named tuples. Parameters ---------- metar_text : str The METAR report station_metadata : dict Mapping of station identifiers to station metadata year : in...
def parse_metar(metar_text, year, month, station_metadata=station_info): """Parse a METAR report in text form into a list of named tuples. Parameters ---------- metar_text : str The METAR report station_metadata : dict Mapping of station identifiers to station metadata year : in...
42,005
def _run_iteration( zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0 ) -> Tuple[Dict[complex, Union[int, float]], float]: max_fractional_delta = 0.0 for coord in coordinates: current_val = zmap.get(coord, None) max_neighbor = -np.inf min_ne...
def _run_iteration( zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0 ) -> Tuple[Dict[complex, Union[int, float]], float]: max_fractional_delta = 0.0 for coord in coordinates: current_val = zmap.get(coord, None) max_neighbor = -np.inf min_ne...
38,902
def field_singleton_schema( # noqa: C901 (ignore complexity) field: Field, *, by_alias: bool, model_name_map: Dict[Type['BaseModel'], str], schema_overrides: bool = False, ref_prefix: Optional[str] = None, known_models: Set[Type['BaseModel']], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: ...
def field_singleton_schema( # noqa: C901 (ignore complexity) field: Field, *, by_alias: bool, model_name_map: Dict[Type['BaseModel'], str], schema_overrides: bool = False, ref_prefix: Optional[str] = None, known_models: Set[Type['BaseModel']], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: ...
44,177
def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs): """Computes the ExpvalCost and catches the initial deprecation warning.""" with pytest.warns(UserWarning, match="will be deprecated,"): res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs) return res
def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs): """Computes the ExpvalCost and catches the initial deprecation warning.""" with pytest.warns(UserWarning, match="is deprecated,"): res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs) return res
4,560
def clean(signals, sessions=None, detrend=True, standardize='zscore', confounds=None, standardize_confounds=True, filter="butterworth", low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False): """Improve SNR on masked fMRI signals. This function can do several things on the input signa...
def clean(signals, sessions=None, detrend=True, standardize='zscore', confounds=None, standardize_confounds=True, filter='butterworth', low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False): """Improve SNR on masked fMRI signals. This function can do several things on the input signa...
14,255
def get_sim_steps( time: Union[Real, Decimal], units: str = "step", round_mode: str = "error" ) -> int: """Calculates the number of simulation time steps for a given amount of *time*. Args: time: The value to convert to simulation time steps. units: String specifying the units of th...
def get_sim_steps( time: Union[Real, Decimal], units: str = "step", round_mode: str = "error" ) -> int: """Calculates the number of simulation time steps for a given amount of *time*. Args: time: The value to convert to simulation time steps. units: String specifying the units of th...
14,125
def _continuous_to_discrete_coords(total_bounds, bounds, p): """ Calculates mid points & ranges of geoms and returns as discrete coords Parameters ---------- total_bounds : Total bounds of geometries - array bounds : Bounds of each geometry - array p : The number of iterations used ...
def _continuous_to_discrete_coords(total_bounds, bounds, p): """ Calculates mid points & ranges of geoms and returns as discrete coords Parameters ---------- total_bounds : Total bounds of geometries - array bounds : Bounds of each geometry - array p : The number of iterations used ...
6,585
def execute(): click.secho( "E-Invoicing Integration is moved to a separate app and will be removed from ERPNext in version-14.\n" "Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance", fg="yellow", )
def execute(): click.secho( "Indian E-Invoicing integration is moved to a separate app and will be removed from ERPNext in version-14.\n" "Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance", fg="yellow", )
20,458
def merge_stock_location_path_stock_rule(env): openupgrade.logged_query( env.cr, """ INSERT INTO stock_rule (name, active, action, sequence, company_id, location_id, location_src_id, route_id, procure_method, route_sequence, picking_type_id, delay, propagate, warehouse_id, ...
def merge_stock_location_path_stock_rule(env): openupgrade.logged_query( env.cr, """ INSERT INTO stock_rule (name, active, action, sequence, company_id, location_id, location_src_id, route_id, procure_method, route_sequence, picking_type_id, delay, propagate, warehouse_id, ...
31,722
def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse: """ get-remote-data command: Returns an updated incident and entries If offense's events were updated in the long running container, update the demisto incident. Args: client (Client): QRad...
def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse: """ get-remote-data command: Returns an updated incident and entries If offense's events were updated in the long running container, update the demisto incident. Args: client (Client): QRad...
6,077
def matchQueue(jobJDL, queueDict, fullMatch=False): """ Match the job description to the queue definition :param str job: JDL job description :param bool fullMatch: test matching on all the criteria :param dict queueDict: queue parameters dictionary :return: S_OK/S_ERROR, Value - result of matching, S_OK ...
def matchQueue(jobJDL, queueDict, fullMatch=False): """ Match the job description to the queue definition :param str job: JDL job description :param bool fullMatch: test matching on all the criteria :param dict queueDict: queue parameters dictionary :return: S_OK/S_ERROR, Value - result of matching, S_OK ...
25,968
def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None, sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None, location_mode=None): logger.debug('Getting data service client service_type=...
def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None, sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None, location_mode=None): logger.debug('Getting data service client service_type=...
54,216
def group_settings_greedy(settings: Iterable[InitObsSetting]) \ -> Dict[InitObsSetting, List[InitObsSetting]]: """ Group a list of settings which can be simultaneously measured via a greedy algorithm. We construct a dictionary keyed by `max_setting` (see docstrings for `_max_weight_state` a...
def group_settings_greedy(settings: Iterable[InitObsSetting]) \ -> Dict[InitObsSetting, List[InitObsSetting]]: """ Group a list of settings which can be simultaneously measured via a greedy algorithm. We construct a dictionary keyed by `max_setting` (see docstrings for `_max_weight_state` a...
20,273
def unholder(item): """Get the held itme of an object holder of list of object holers.""" if isinstance(item, list): return [i.held_object if hasattr(i, 'held_object') else i for i in item] if hasattr(item, 'held_object'): return item.held_object return item
def unholder(item): """Get the held item of an object holder or list of object holders.""" if isinstance(item, list): return [i.held_object if hasattr(i, 'held_object') else i for i in item] if hasattr(item, 'held_object'): return item.held_object return item
40,426
def test_graph_store_conversion(): graph_store = MyGraphStore() edge_index = get_edge_index(100, 100, 300) edge_index = sort_edge_index(edge_index, sort_by_row=False) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) coo = (edge_index[0], edge_index[1]) csr = adj.csr()[:2]...
def test_graph_store_conversion(): graph_store = MyGraphStore() edge_index = get_edge_index(100, 100, 300) edge_index = sort_edge_index(edge_index, sort_by_row=False) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) coo = (edge_index[0], edge_index[1]) csr = adj.csr()[:2]...
58,329
def rk4(f, x, t, dt, stages=4, s=0.0): """Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers. The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0 convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama schemefor SDEs (s > 0.0) with stron...
def rk4(f, x, t, dt, stages=4, s=0.0): """Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers. The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0 convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama schemefor SDEs (s > 0.0) with stron...
53,266
def boris_push_relativistic(x, v, B, E, q, m, dt): r""" The explicit Boris pusher, including realtivistic corrections. Parameters ---------- x : np.ndarray particle position at full timestep, in SI (meter) units. v : np.ndarray particle velocity at half timestep, in SI (meter/se...
def boris_push_relativistic(x, v, B, E, q, m, dt): r""" The explicit Boris pusher, including realtivistic corrections. Parameters ---------- x : np.ndarray particle position at full timestep, in SI (meter) units. v : np.ndarray particle velocity at half timestep, in SI (meter/se...
1,217
def needs_nibabel_data(subdir=None): """ Decorator for tests needing nibabel-data Parameters ---------- subdir : None or str Subdirectory we need in nibabel-data directory. If None, only require nibabel-data directory itself. Returns ------- skip_dec : decorator De...
def needs_nibabel_data(subdir=None): """ Decorator for tests needing nibabel-data Parameters ---------- subdir : None or str Subdirectory we need in nibabel-data directory. If None, only require nibabel-data directory itself. Returns ------- skip_dec : decorator De...
57,843
def main() -> None: try: arguments = demisto.args() api_key = demisto.params().get('apikey') base_url = urljoin(demisto.params()['url'], '/api/') verify_certificate = not demisto.params().get('insecure', False) first_fetch_time = arg_to_timestamp( arg=demisto.para...
def main() -> None: try: arguments = demisto.args() api_key = demisto.params().get('apikey') base_url = urljoin(demisto.params()['url'], '/api/') verify_certificate = not demisto.params().get('insecure', False) first_fetch_time = arg_to_timestamp( arg=demisto.para...
57,765
def test_module(client: Client) -> str: """Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. :type client: ``Client`` :param Client: GreatHor...
def test_module(client: Client) -> str: """Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. :type client: ``Client`` :param Client: GreatHor...
31,228
def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """Get Connector Runs command. Args: client: Client which connects to api Returns: Human Readable Entry Context Raw Data """ connector_id = demisto.getArg("connector_id") ...
def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """Get Connector Runs command. Args: client: Client which connects to api Returns: Human Readable Entry Context Raw Data """ connector_id = str(args.get("connector_id")) ...
31,366
def is_there_private_packs_to_upload(public_index_json, private_index_path): """ Checks if there are private packs that should be uploaded. The check compares the private index with the public one to verify if Content commit hash of each private pack in those files (private and public index files) are equal...
def is_there_private_packs_to_upload(public_index_json, private_index_path): """ Checks if there are private packs that should be uploaded. The check compares the private index with the public one to verify if Content commit hash of each private pack in those files (private and public index files) are equal...
5,862
def _dirstats_preprocessing(samples, normalize, axis): """ Preprocessing of input for directional stats functions. Performs input validation and if necesssary normalization. Used by directionalvar and directionalmean. Parameters ---------- samples : array Input array. Must be at lea...
def _dirstats_preprocessing(samples, normalize, axis): """ Preprocessing of input for directional stats functions. Performs input validation and if necesssary normalization. Used by directionalvar and directionalmean. Parameters ---------- samples : array Input array. Must be at lea...
42,827
def backup_packages(backup_path, dry_run: bool = False, skip=False): """ Creates `packages` directory and places install list text files there. """ def run_cmd_if_no_dry_run(command, dest, dry_run) -> int: if dry_run: print_dry_run_copy_info(f"$ {command}", dest) # Return -1 for any processes depending on c...
def backup_packages(backup_path, dry_run: bool = False, skip=False): """ Creates `packages` directory and places install list text files there. """ def run_cmd_if_no_dry_run(command, dest, dry_run) -> int: if dry_run: print_dry_run_copy_info(f"$ {command}", dest) # Return -1 for any processes depending on c...
38,427
def register_keys(web3: Web3, keys: Optional[list]): def not_none(x): return x if x is not None else [] for key in not_none(keys): register_key(web3, key)
def register_keys(web3: Web3, keys: Optional[list]): def not_none(x): return x if x is not None else [] for key in keys or []: register_key(web3, key)
39,301
def vtk_points(points, deep=True): """Convert numpy or list of points to a vtkPoints object.""" if not isinstance(points, np.ndarray): points = np.array(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') # che...
def vtk_points(points, deep=True): """Convert numpy array or array-like to a vtkPoints object.""" if not isinstance(points, np.ndarray): points = np.array(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') # c...
End of preview. Expand in Data Studio

Splits: 80% train, 10% validation, 10% test.


📦 Method-Level Change / Code Review Suggestion Dataset 📝 Overview This dataset is designed for training or fine-tuning large language models (LLMs) on the task of automated code suggestion generation at the method level. Each entry in the dataset contains: An original Python method extracted from a GitHub pull request A revised version of the same method, incorporating code review suggestions

🎯 Purpose To enable models to learn fine-grained, real-world code changes suggested during pull request reviews. Ideal for:

  • Method-level code generation
  • Code completion
  • Refactoring suggestions
  • Review automation

🔍 Source Mined from public GitHub repositories using GraphQL and REST APIs. Pull request review suggestions were extracted and aligned with method-level changes. For more on how suggestions work in GitHub PRs, see: Incorporating Feedback in Your Pull Request https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request

Downloads last month
15