query
stringlengths
8
6.75k
document
stringlengths
9
1.89M
negatives
listlengths
19
19
metadata
dict
NewOffChainReportingSpec initializes a new OffChainReportingSpec from a job.OCROracleSpec
func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec { return &OffChainReportingSpec{ ContractAddress: spec.ContractAddress, P2PBootstrapPeers: spec.P2PBootstrapPeers, P2PV2Bootstrappers: spec.P2PV2Bootstrappers, ...
[ "func NewOffChainReportingSpec(spec *job.OffchainReportingOracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PPeerID: spec.P2PPeerID,\n\t\tP2PBootstrapPeers: spec.P2PBootst...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewOffChainReporting2Spec initializes a new OffChainReportingSpec from a job.OCR2OracleSpec
func NewOffChainReporting2Spec(spec *job.OCR2OracleSpec) *OffChainReporting2Spec { return &OffChainReporting2Spec{ ContractID: spec.ContractID, Relay: spec.Relay, RelayConfig: spec.RelayConfig, P2PV2Bootstrappers: spec.P2PV...
[ "func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tP2PV2Bootstrappers: spec.P2PV2B...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewPipelineSpec generates a new PipelineSpec from a pipeline.Spec
func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec { return PipelineSpec{ ID: spec.ID, JobID: spec.JobID, DotDAGSource: spec.DotDagSource, } }
[ "func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec {\n\treturn PipelineSpec{\n\t\tID: spec.ID,\n\t\tDotDAGSource: spec.DotDagSource,\n\t}\n}", "func (in *PipelineSpec) DeepCopy() *PipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewKeeperSpec generates a new KeeperSpec from a job.KeeperSpec
func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec { return &KeeperSpec{ ContractAddress: spec.ContractAddress, FromAddress: spec.FromAddress, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, } }
[ "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewKeeper(am sdk.AccountMapper) Keeper {\n\treturn Keeper{a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewWebhookSpec generates a new WebhookSpec from a job.WebhookSpec
func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec { return &WebhookSpec{ CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
[ "func (in *GitHubWebhookSpec) DeepCopy() *GitHubWebhookSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(GitHubWebhookSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewWebhook(log logr.Logger) Webhook {\n\tac := serializer.NewCodecFactory(runtime.NewScheme())\n\td := ac.UniversalDeserializer()\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewCronSpec generates a new CronSpec from a job.CronSpec
func NewCronSpec(spec *job.CronSpec) *CronSpec { return &CronSpec{ CronSchedule: spec.CronSchedule, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
[ "func NewCronFromJobSpec(\n\tjobSpec job.Job,\n\tpipelineRunner pipeline.Runner,\n) (*Cron, error) {\n\n\tcronSpec := jobSpec.CronSpec\n\tspec := jobSpec.PipelineSpec\n\n\tcronLogger := logger.CreateLogger(\n\t\tlogger.Default.With(\n\t\t\t\"jobID\", jobSpec.ID,\n\t\t\t\"schedule\", cronSpec.CronSchedule,\n\t\t),\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewBlockhashStoreSpec creates a new BlockhashStoreSpec for the given parameters.
func NewBlockhashStoreSpec(spec *job.BlockhashStoreSpec) *BlockhashStoreSpec { return &BlockhashStoreSpec{ CoordinatorV1Address: spec.CoordinatorV1Address, CoordinatorV2Address: spec.CoordinatorV2Address, CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, WaitBlocks: ...
[ "func newBlockStore(id string, conf *Conf, indexConfig *IndexConfig,\n\tdbHandle *leveldbhelper.DBHandle, stats *stats) (*BlockStore, error) {\n\tfileMgr, err := newBlockfileMgr(id, conf, indexConfig, dbHandle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create ledgerStats and initialize blockchain_height ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewBlockHeaderFeederSpec creates a new BlockHeaderFeederSpec for the given parameters.
func NewBlockHeaderFeederSpec(spec *job.BlockHeaderFeederSpec) *BlockHeaderFeederSpec { return &BlockHeaderFeederSpec{ CoordinatorV1Address: spec.CoordinatorV1Address, CoordinatorV2Address: spec.CoordinatorV2Address, CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, WaitBlocks: ...
[ "func (_Rootchain *RootchainFilterer) FilterNewHeaderBlock(opts *bind.FilterOpts, proposer []common.Address, headerBlockId []*big.Int, reward []*big.Int) (*RootchainNewHeaderBlockIterator, error) {\n\n\tvar proposerRule []interface{}\n\tfor _, proposerItem := range proposer {\n\t\tproposerRule = append(proposerRule...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewBootstrapSpec initializes a new BootstrapSpec from a job.BootstrapSpec
func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec { return &BootstrapSpec{ ContractID: spec.ContractID, Relay: spec.Relay, RelayConfig: spec.RelayConfig, BlockchainTimeout: spec.BlockchainTimeout, ContractCon...
[ "func newBootstrapTemplate(e2eCtx *E2EContext) *cfn_bootstrap.Template {\n\tBy(\"Creating a bootstrap AWSIAMConfiguration\")\n\tt := cfn_bootstrap.NewTemplate()\n\tt.Spec.BootstrapUser.Enable = true\n\tt.Spec.SecureSecretsBackends = []v1alpha3.SecretBackend{\n\t\tv1alpha3.SecretBackendSecretsManager,\n\t\tv1alpha3....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewJobResource initializes a new JSONAPI job resource
func NewJobResource(j job.Job) *JobResource { resource := &JobResource{ JAID: NewJAIDInt32(j.ID), Name: j.Name.ValueOrZero(), Type: JobSpecType(j.Type), SchemaVersion: j.SchemaVersion, GasLimit: j.GasLimit, ForwardingAllowed: j.ForwardingAllowed, MaxTas...
[ "func (c *Controller) NewJob() (*Job, error) {\n var job Job\n if err := job.Init(strconv.Itoa(c.NextJobId), c.NamePrefix); err != nil {\n return nil, err\n }\n\n c.NextJobId += 1\n return &job, nil\n}", "func (a *API) newJob(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// load the f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithGroupIDConsumerOption provides an option to modify the GroupID for a consumer Group
func WithGroupIDConsumerOption(groupID string) ConsumerOption { return func(c *Consumer) { c.config.GroupID = groupID } }
[ "func WithConsumerGroupID(groupID string) ConfigOpt {\n\treturn func(c *kafkalib.ConfigMap) {\n\t\t_ = c.SetKey(\"group.id\", groupID)\n\t}\n}", "func ConsumerSetGroupID(groupID string) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerGroupID = groupID })\n}", "func DeliverGroup(g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithTopicConsumerOption provides an option to modify the topic on which the Consumer will listen to
func WithTopicConsumerOption(topic string) ConsumerOption { return func(c *Consumer) { c.config.Topic = topic } }
[ "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func (c *Consumer) SetTopic(topic string) *Consumer {\n\tif topic != \"\" {\n\t\tc.mutex.Lock()\n\t\tc.bind.SetKey(topic)\n\t\tc.mutex.Unlock()\n\t\tc.SetQueueName(true, \"\")\n\t\tc.SetChannelKey(true, \"\")\n\t}\n\tret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithMaxMinByteConsumerOption provides an option to modify the min/max byte that can written to kafka
func WithMaxMinByteConsumerOption(min, max int) ConsumerOption { return func(c *Consumer) { c.config.MinBytes = min c.config.MaxBytes = max } }
[ "func (o *consumer) setMaxPendingBytes(limit int) {\n\to.pblimit = limit\n\to.maxpb = limit / 16\n\tif o.maxpb == 0 {\n\t\to.maxpb = 1\n\t}\n}", "func MaxRequestMaxBytes(max int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestMaxBytes = max\n\t\treturn nil\n\t}\n}", "func WithMa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAutoCommitConsumerOption sets the autocommit property of consumer
func WithAutoCommitConsumerOption(flag bool) ConsumerOption { return func(c *Consumer) { c.autocommit = flag } }
[ "func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption {\n\treturn func(c *Consumer) { c.config.CommitInterval = dur }\n}", "func AutoAck() ConsumerOpt {\n\treturn func(c *Consumer) {\n\t\tc.autoAck = true\n\t}\n}", "func AutoCommitInterval(interval time.Duration) GroupOpt {\n\treturn groupOpt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAutoCommitTimeConsumerOption sets the auto commit time for Consumer
func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption { return func(c *Consumer) { c.config.CommitInterval = dur } }
[ "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func AutoCommitInterval(interval time.Duration) GroupOpt {\n\treturn groupOpt{func(cfg *groupConsumer) { cfg.autocommitInterval = interval }}\n}", "func AutoAck() ConsumerOpt {\n\treturn func...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithDecoderConsumerOption sets the decoder for the Consumer Message
func WithDecoderConsumerOption(fn Decoder) ConsumerOption { return func(c *Consumer) { c.dec = fn } }
[ "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func WithDecoder(key string, dec func(body io.ReadCloser) (io.ReadCloser, error)) ToServerOption {\n\treturn func(opts *toServerOptions) {\n\t\tif opts.decoders == nil {\n\t\t\topts.decode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithBeforeFuncsConsumerOption provides a way to set BeforeFunc(s) to the consumer
func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption { return func(c *Consumer) { c.befores = append(c.befores, fns...) } }
[ "func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.afters = append(c.afters, fns...) }\n}", "func (c *callback) AddBeforeInvoke(conf rpc.CallbackFunc) {\n\tf, ok := beforeInvokeRegistry[conf.Name]\n\tif !ok {\n\t\tlog.DefaultLogger.Errorf(\"[runtime][rpc]can't fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAfterFuncsConsumerOption provides a way to set AfterFunc(s) to the consumer
func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption { return func(c *Consumer) { c.afters = append(c.afters, fns...) } }
[ "func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.befores = append(c.befores, fns...) }\n}", "func (response *Response) After(fn func()) {\n\tresponse.afterFuncs = append(response.afterFuncs, fn)\n}", "func After(routines []func(), callback func()) {\n\tvar ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithEndpointConsumerOption provides a way to set endpoint to the consumer
func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption { return func(c *Consumer) { c.end = end } }
[ "func WithEndpoint(endpoint string) Option {\n\treturn func(cfg *config) {\n\t\tcfg.collectorEndpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) Opt {\n\treturn func(c *Client) {\n\t\tc.endpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) Option {\n\treturn func(a *Client) error {\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithReaderConsumerOption lets you set the reader for kafka
func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption { return func(c *Consumer) { c.reader = reader } }
[ "func ConsumerReader(r StreamReader) ConsumerOptionsFn {\n\treturn func(o *Consumer) error {\n\t\to.reader = r\n\t\treturn nil\n\t}\n}", "func WithReader(r reader.Reader) Option {\n\treturn func(c *client) error {\n\t\tif r != nil {\n\t\t\tc.reader = r\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithReader(r reader....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithOffsetConsumerOption lets you set the kafka offset to read from
func WithOffsetConsumerOption(offset int64) ConsumerOption { return func(c *Consumer) { switch offset { case LastOffset: c.config.StartOffset = LastOffset case FirstOffset: c.config.StartOffset = FirstOffset default: c.config.StartOffset = FirstOffset } } }
[ "func WithOffset(offset int) eventsource.QueryOption {\n\treturn func(i interface{}) {\n\t\tif o, ok := i.(*options); ok {\n\t\t\to.offset = &offset\n\t\t}\n\t}\n}", "func ReadOffset(o uint) ReadOption {\n\treturn func(r *ReadOptions) {\n\t\tr.Offset = o\n\t}\n}", "func WithReaderConsumerOption(reader *kafgo.Re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewConsumer returns kafka consumer for the given brokers
func NewConsumer( brokers []string, logger log.Logger, options ...ConsumerOption, ) (*Consumer, error) { // default values cfg := kafgo.ReaderConfig{ Brokers: brokers, GroupID: defaultConsumerGroupID, Topic: defaultTopic, Logger: kafka.LoggerFunc(logger.Debugf), } cs := &Consumer{ reader: nil, co...
[ "func NewConsumer(topic, groupID string, brokers []string) *Consumer {\n\tnewConsumer := kafka.NewReader(kafka.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: groupID,\n\t\tTopic: topic,\n\t\tQueueCapacity: 1,\n\t\tStartOffset: kafka.FirstOffset,\n\t})\n\n\treturn &Consumer{\n\t\tconsumer: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InnerJoin selects records that have matching values in both tables. left datatable is used as reference datatable. InnerJoin transforms an expr column to a raw column
func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute() }
[ "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func TestDb_InnerJoin(t *testing.T) {\n\tsql := engine...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InnerJoin selects records that have matching values in both tables. tables[0] is used as reference datatable.
func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(innerJoin, tables, on).Compute() }
[ "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func TestDb_InnerJoin(t *testing.T) {\n\tsql := engine.Select(\"a.runoob_id\", \"a.runoob_author\", \"b.runoob_count\").\n\t\tFrom(\" runoob_tbl a\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LeftJoin the tables. tables[0] is used as reference datatable.
func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(leftJoin, tables, on).Compute() }
[ "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(bod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RightJoin the tables. tables[0] is used as reference datatable.
func RightJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(rightJoin, tables, on).Compute() }
[ "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (w *Wrapper) RightJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"RIGHT JOIN\", condition)\n\treturn w\n}", "func (dba *Sqler) RightJoin(args ...interfac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
OuterJoin the tables. tables[0] is used as reference datatable.
func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(outerJoin, tables, on).Compute() }
[ "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (s *Selec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetOrders retrieves paginated orders from the Mesh DB at a specific snapshot in time. Passing an empty string as `snapshotID` creates a new snapshot and returns the first set of results. To fetch all orders, continue to make requests supplying the `snapshotID` returned from the first request. After 1 minute of not rece...
func (app *App) GetOrders(page, perPage int, snapshotID string) (*rpc.GetOrdersResponse, error) { ordersInfos := []*zeroex.AcceptedOrderInfo{} if perPage <= 0 { return &rpc.GetOrdersResponse{ OrdersInfos: ordersInfos, SnapshotID: snapshotID, }, nil } var snapshot *db.Snapshot if snapshotID == "" { //...
[ "func (handler *rpcHandler) GetOrders(page, perPage int, snapshotID string) (result *types.GetOrdersResponse, err error) {\n\tlog.WithFields(map[string]interface{}{\n\t\t\"page\": page,\n\t\t\"perPage\": perPage,\n\t\t\"snapshotID\": snapshotID,\n\t}).Debug(\"received GetOrders request via RPC\")\n\t// Cat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddOrders can be used to add orders to Mesh. It validates the given orders and if they are valid, will store and eventually broadcast the orders to peers.
func (app *App) AddOrders(signedOrdersRaw []*json.RawMessage) (*zeroex.ValidationResults, error) { allValidationResults := &zeroex.ValidationResults{ Accepted: []*zeroex.AcceptedOrderInfo{}, Rejected: []*zeroex.RejectedOrderInfo{}, } schemaValidOrders := []*zeroex.SignedOrder{} for _, signedOrderRaw := range si...
[ "func (handler *rpcHandler) AddOrders(orders []*zeroex.SignedOrder) (*zeroex.ValidationResults, error) {\n\tlog.Debug(\"received AddOrders request via RPC\")\n\tvalidationResults, err := handler.app.AddOrders(orders)\n\tif err != nil {\n\t\t// We don't want to leak internal error details to the RPC client.\n\t\tlog...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddPeer can be used to manually connect to a new peer.
func (app *App) AddPeer(peerInfo peerstore.PeerInfo) error { ctx, cancel := context.WithTimeout(context.Background(), peerConnectTimeout) defer cancel() return app.node.Connect(ctx, peerInfo) }
[ "func (r *Reactor) AddPeer(peer p2p.Peer) {}", "func AddPeer(w http.ResponseWriter, r *http.Request) {\n\t// Connect to the peer\n\tvar newPeers peerStr\n\n\terr := json.NewDecoder(r.Body).Decode(&newPeers)\n\tif err != nil {\n\t\tlog.Println(\"AddPeer: could not decode peer\")\n\t}\n\tlog.Println(newPeers)\n\tlo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SubscribeToOrderEvents let's one subscribe to order events emitted by the OrderWatcher
func (app *App) SubscribeToOrderEvents(sink chan<- []*zeroex.OrderEvent) event.Subscription { subscription := app.orderWatcher.Subscribe(sink) return subscription }
[ "func (handler *rpcHandler) SubscribeToOrders(ctx context.Context) (*ethRpc.Subscription, error) {\n\tlog.Debug(\"received order event subscription request via RPC\")\n\tsubscription, err := SetupOrderStream(ctx, handler.app)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in `m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ServeHTTP populates the status page template with data and serves it when there is a request.
func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if s.Authenticator != nil { _, err := s.Authenticator.Authenticate(r.Context(), w, r) if errors.Is(err, oidc.ErrRedirectRequired) { return } if err != nil { http.Error(w, "Error: Authentication failed", http.StatusInternalSer...
[ "func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Applier status request\", \"time\", s.Clock.Now().String())\n\tif s.Template == nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request fail...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ServeHTTP handles requests for forcing a run by attempting to add to the runQueue, and writes a response including the result and a relevant message.
func (f *ForceRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Logger("webserver").Info("Force run requested") var data struct { Result string `json:"result"` Message string `json:"message"` } switch r.Method { case "POST": var ( userEmail string err error ) if f.Authenti...
[ "func (f *ForceRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Force run requested\")\n\tvar data struct {\n\t\tResult string `json:\"result\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tselect {\n\t\tcase f.RunQueue <- true:\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shutdown gracefully shuts the webserver down.
func (ws *WebServer) Shutdown() error { err := ws.server.Shutdown(context.Background()) ws.server = nil return err }
[ "func (l *Launcher) Shutdown(ctxt context.Context) error {\n\t// send Browser.close() directly to devtools URL\n\tif l.devtoolsURL != \"\" {\n\t\tconn, err := client.Dial(l.devtoolsURL)\n\t\tif err == nil {\n\t\t\t_ = conn.Write(shutdownMsg)\n\t\t}\n\t}\n\n\t// osx applications do not automatically exit when all wi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithHTTPTimeout returns an HTTPCheckerOption that specifies the timeout for HTTP requests. Setting a timeout is highly recommended, but it needs to be carefully chosen to avoid false results.
func WithHTTPTimeout(timeout time.Duration) HTTPCheckerOption { return func(c *HTTPChecker) { c.timeout = timeout } }
[ "func WithHTTPTimeout(in time.Duration) Option {\n\treturn func(c *Client) {\n\t\tc.HTTPTimeout = in\n\t}\n}", "func WithHTTPTimeout(timeout time.Duration) ClientOps {\r\n\treturn func(c *clientOptions) {\r\n\t\tc.httpTimeout = timeout\r\n\t}\r\n}", "func HTTPTimeout(duration time.Duration) HTTPOption {\n\tretu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithHTTPMethod returns an HTTPCheckerOption that specifies the method for HTTP requests. The default method is "GET" which works in most of the cases, but another popular choice is "HEAD".
func WithHTTPMethod(method string) HTTPCheckerOption { return func(c *HTTPChecker) { c.method = method } }
[ "func WithMethod(method string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http method option can not set nil protocol\")\n\t\t}\n\t\tmethod = strings.TrimSpace(method)\n\t\tif method != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewHTTPChecker creates a new HTTPChecker with a URL and optional configuration. Example: checker := healthz.NewHTTPChecker(" healthz.WithHTTPTimeout(3time.Second))
func NewHTTPChecker(url string, opts ...HTTPCheckerOption) *HTTPChecker { checker := &HTTPChecker{ url: url, method: http.MethodGet, } for _, opt := range opts { opt(checker) } return checker }
[ "func NewHTTPCheck(name, endpoint string) (Check, error) {\n\tep, err := url.Parse(endpoint)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thc := &httpCheck{\n\t\tcheck: newCheck(name, ep.Hostname(), CheckTypeHTTP),\n\t\tURL: ep.Path,\n\t}\n\n\tif ep.Scheme == \"https\" {\n\t\thc.Encryption = true\n\t}\n\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check implements the Checker interface and checks the HTTP endpoint status.
func (c *HTTPChecker) Check() error { client := &http.Client{ Timeout: c.timeout, } req, err := http.NewRequest(c.method, c.url, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } if resp.StatusCode != http.StatusOK { return ErrCheckFailed } return nil }
[ "func Check(url string, client HTTPGetInterface) (Status, error) {\n\tres, err := client.Get(url)\n\tif err != nil {\n\t\treturn Unknown, err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest {\n\t\treturn Healthy, nil\n\t}\n\tglog.V(1).Infof(\"Health che...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a string, this tests variants of buffer conversion: string with trailing 0's, string exactly filling the slice passed to CFieldString (simulating an exactlyfull field), and first character (exactly filling the field).
func teststring(t *testing.T, s string) { buf := toint8(s) r := kstat.CFieldString(buf[:]) if r != s { t.Fatalf("full buf mismatch: %q vs %q", s, r) } r = kstat.CFieldString(buf[:len(s)]) if r != s { t.Fatalf("exact buf mismatch: %q vs %q", s, r) } r = kstat.CFieldString(buf[:len(s)+1]) if r != s { t.Fat...
[ "func cString(b []byte) string {\n\tn := 0\n\tfor n < len(b) && b[n] != 0 {\n\t\tn++\n\t}\n\treturn string(b[0:n])\n}", "func cstring(b []byte) string {\n\tvar i int\n\tfor i = 0; i < len(b) && b[i] != 0; i++ {\n\t}\n\treturn string(b[:i])\n}", "func ReadString(b []byte) (string, error) {\n\tfor i, c := range b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings. Learn more Design a FlexMatch matchmaker (
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) { if params == nil { params = &UpdateMatchmakingConfigurationInput{} } result, metadata, err := c.invokeOperation(ctx, "UpdateM...
[ "func (runner *McRunner) applySettings() {\n\tpropPath := filepath.Join(McServerPath(), \"server.properties\")\n\tprops, err := ioutil.ReadFile(propPath)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tnameExp, _ := regexp.Compile(\"displayname=.*\\\\n\")\n\tmotdExp, _ := regexp.Compile(\"motd=.*\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StartEventSource starts an event source
func (ese *GitlabEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error { defer gateways.Recover(eventSource.Name) log := ese.Log.WithEventSource(eventSource.Name) log.Info("operating on event source") config, err := parseEventSource(ev...
[ "func (ese *SlackEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tdefer gateways.Recover(eventSource.Name)\n\n\tlog := ese.Log.WithEventSource(eventSource.Name)\n\tlog.Info(\"operating on event source\")\n\n\tconfig, err := par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchOrganization provides a mock function with given fields: filters
func (_m *OrganizationFetcher) FetchOrganization(filters []services.QueryFilter) (models.Organization, error) { ret := _m.Called(filters) var r0 models.Organization if rf, ok := ret.Get(0).(func([]services.QueryFilter) models.Organization); ok { r0 = rf(filters) } else { r0 = ret.Get(0).(models.Organization) ...
[ "func (m *MockOrganizationServiceClient) FetchOrganizationList(arg0 context.Context, arg1 *organization.Empty, arg2 ...grpc.CallOption) (*organization.OrganizationListResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pingLoop periodically sends a ping to all remote clusters.
func (rcs *Service) pingLoop(done <-chan struct{}) { pingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2) // create a thread pool to send pings concurrently to remotes. for i := 0; i < MaxConcurrentSends; i++ { go rcs.pingEmitter(pingChan, done) } go rcs.pingGenerator(pingChan, done) }
[ "func pingLoop(results chan Host, hostRegistry *HostRegistry, interval time.Duration, timeout time.Duration) {\n\tfor {\n\t\thostAddresses := hostRegistry.GetHostAddresses()\n\n\t\tlog.Info(\"Pinging these addresses: %q\\n\", hostAddresses)\n\n\t\tfor _, address := range hostAddresses {\n\t\t\tlog.Debug(\"Pinging: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pingEmitter pulls Remotes from the ping queue (pingChan) and pings them. Pinging a remote cannot take longer than PingTimeoutMillis.
func (rcs *Service) pingEmitter(pingChan <-chan *model.RemoteCluster, done <-chan struct{}) { for { select { case rc := <-pingChan: if rc == nil { return } online := rc.IsOnline() if err := rcs.pingRemote(rc); err != nil { rcs.server.Log().Log(mlog.LvlRemoteClusterServiceWarn, "Remote cluster...
[ "func (rcs *Service) pingLoop(done <-chan struct{}) {\n\tpingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2)\n\n\t// create a thread pool to send pings concurrently to remotes.\n\tfor i := 0; i < MaxConcurrentSends; i++ {\n\t\tgo rcs.pingEmitter(pingChan, done)\n\t}\n\n\tgo rcs.pingGenerator(pingChan,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pingRemote make a synchronous ping to a remote cluster. Return is error if ping is unsuccessful and nil on success.
func (rcs *Service) pingRemote(rc *model.RemoteCluster) error { frame, err := makePingFrame(rc) if err != nil { return err } url := fmt.Sprintf("%s/%s", rc.SiteURL, PingURL) resp, err := rcs.sendFrameToRemote(PingTimeout, rc, frame, url) if err != nil { return err } ping := model.RemoteClusterPing{} err ...
[ "func (rn *RemoteNode) Ping() error {\n\tmsg, err := rn.LocalNode.NewPingMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rn.SendMessageSync(msg, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (rn *RemoteNode) Ping() error {\n\tmsg, err := NewPingMessage()\n\tif err != nil {\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsUserInProtectBranchWhitelist returns true if given user is in the whitelist of a branch in a repository.
func IsUserInProtectBranchWhitelist(repoID, userID int64, branch string) bool { has, err := x.Where("repo_id = ?", repoID).And("user_id = ?", userID).And("name = ?", branch).Get(new(ProtectBranchWhitelist)) return has && err == nil }
[ "func (c *cachingStore) IsBackendUserAllowed(ctx context.Context, backendUser, backendID string) (bool, error) {\n\treturn c.BackingStore.IsBackendUserAllowed(ctx, backendUser, backendID)\n}", "func (cfg *Config) inWhitelist(addr net.Addr) bool {\n\tif len(cfg.Whitelists) == 0 {\n\t\treturn false\n\t}\n\n\thost, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetProtectBranchOfRepoByName returns ProtectBranch by branch name in given repository.
func GetProtectBranchOfRepoByName(repoID int64, name string) (*ProtectBranch, error) { protectBranch := &ProtectBranch{ RepoID: repoID, Name: name, } has, err := x.Get(protectBranch) if err != nil { return nil, err } else if !has { return nil, ErrBranchNotExist{args: map[string]any{"name": name}} } ret...
[ "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: ow...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsBranchOfRepoRequirePullRequest returns true if branch requires pull request in given repository.
func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool { protectBranch, err := GetProtectBranchOfRepoByName(repoID, name) if err != nil { return false } return protectBranch.Protected && protectBranch.RequirePullRequest }
[ "func (p *PullRequest) IsForkPullRequest() bool {\n\treturn p.Head.RepoPath() != p.Base.RepoPath()\n}", "func ifPullRequest(issue *github.Issue) bool {\n\tif issue.PullRequestLinks != nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *PullRequestComment) HasPullrequest() bool {\n\tif o != nil && o.Pullr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UpdateProtectBranch saves branch protection options. If ID is 0, it creates a new record. Otherwise, updates existing record.
func UpdateProtectBranch(protectBranch *ProtectBranch) (err error) { sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } if protectBranch.ID == 0 { if _, err = sess.Insert(protectBranch); err != nil { return fmt.Errorf("Insert: %v", err) } } if _, err = sess.ID(p...
[ "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/js...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UpdateOrgProtectBranch saves branch protection options of organizational repository. If ID is 0, it creates a new record. Otherwise, updates existing record. This function also performs check if whitelist user and team's IDs have been changed to avoid unnecessary whitelist delete and regenerate.
func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) { if err = repo.GetOwner(); err != nil { return fmt.Errorf("GetOwner: %v", err) } else if !repo.Owner.IsOrganization() { return fmt.Errorf("expect repository owner to be an organizati...
[ "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/js...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetProtectBranchesByRepoID returns a list of ProtectBranch in given repository.
func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) { protectBranches := make([]*ProtectBranch, 0, 2) return protectBranches, x.Where("repo_id = ? and protected = ?", repoID, true).Asc("name").Find(&protectBranches) }
[ "func (c *client) GetBranches(org, repo string, onlyProtected bool) ([]Branch, error) {\n\tdurationLogger := c.log(\"GetBranches\", org, repo, onlyProtected)\n\tdefer durationLogger()\n\n\tvar branches []Branch\n\terr := c.readPaginatedResultsWithValues(\n\t\tfmt.Sprintf(\"/repos/%s/%s/branches\", org, repo),\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
QuickExec quick exec an simple command line
func QuickExec(cmdLine string, workDir ...string) (string, error) { return ExecLine(cmdLine, workDir...) }
[ "func Exec(cmd string) {\n\n\tfmt.Printf(\"Você digitou: %s \", cmd)\n\n}", "func Setup(c *exec.Cmd) {}", "func ExecBuiltin(args []string) {\n\tif len(args) <= 0 {\n\t\tPanic(\"No parameters\")\n\t}\n\n\t//TODO: Loadings\n\tswitch args[0] {\n\tcase \"Error\":\n\t\tError(strings.Join(args[1:], \" \"))\n\tcase \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExecLine quick exec an command line string
func ExecLine(cmdLine string, workDir ...string) (string, error) { p := cmdline.NewParser(cmdLine) // create a new Cmd instance cmd := p.NewExecCmd() if len(workDir) > 0 { cmd.Dir = workDir[0] } bs, err := cmd.Output() return string(bs), err }
[ "func QuickExec(cmdLine string, workDir ...string) (string, error) {\n\treturn ExecLine(cmdLine, workDir...)\n}", "func exec(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\tr := R\n\tvar in io.Reader\n\tif L != nil {\n\t\tr = L\n\t\tc, ok := R.(apl.Channel)\n\t\tif ok {\n\t\t\tin = bufio.NewReader(apl.NewChan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ShellExec exec command by shell cmdLine. eg: "ls al"
func ShellExec(cmdLine string, shells ...string) (string, error) { // shell := "/bin/sh" shell := "sh" if len(shells) > 0 { shell = shells[0] } var out bytes.Buffer cmd := exec.Command(shell, "-c", cmdLine) cmd.Stdout = &out if err := cmd.Run(); err != nil { return "", err } return out.String(), nil }
[ "func exec(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\tr := R\n\tvar in io.Reader\n\tif L != nil {\n\t\tr = L\n\t\tc, ok := R.(apl.Channel)\n\t\tif ok {\n\t\t\tin = bufio.NewReader(apl.NewChannelReader(a, c))\n\t\t} else {\n\t\t\tin = strings.NewReader(R.String(a))\n\t\t}\n\t}\n\n\tv, ok := domain.ToStringA...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ThreeWay attempts a threeway merge between two candidates and a common ancestor. It considers the three of them recursively, applying some simple rules to identify conflicts: If any of the three nodes are different NomsKinds: conflict If we are dealing with a map: If the same key is both removed and inserted wrt parent...
func ThreeWay(a, b, parent types.Value, vwr types.ValueReadWriter) (merged types.Value, err error) { if a == nil && b == nil { return parent, nil } else if a == nil { return parent, newMergeConflict("Cannot merge nil Value with %s.", b.Type().Describe()) } else if b == nil { return parent, newMergeConflict("Ca...
[ "func ForeignKeysMerge(ctx context.Context, mergedRoot, ourRoot, theirRoot, ancRoot *doltdb.RootValue) (*doltdb.ForeignKeyCollection, []FKConflict, error) {\n\tours, err := ourRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttheirs, err := theirRoot.GetForeignKeyCollection(ct...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a and b cannot be merged if they are of different NomsKind, or if at least one of the two is nil, or if either is a Noms primitive.
func unmergeable(a, b types.Value) bool { if a != nil && b != nil { aKind, bKind := a.Type().Kind(), b.Type().Kind() return aKind != bKind || types.IsPrimitiveKind(aKind) || types.IsPrimitiveKind(bKind) } return true }
[ "func merge(old, a, b interface{}) (merged interface{}, conflicts bool) {\n\tif reflect.DeepEqual(a, b) {\n\t\treturn a, false\n\t}\n\tvold, va, vb := reflect.ValueOf(old), reflect.ValueOf(a), reflect.ValueOf(b)\n\ttold, ta, tb := reflect.TypeOf(old), reflect.TypeOf(a), reflect.TypeOf(b)\n\tif !isSameType(ta, tb) {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewRabbitMQServer returns the new rabbitmq server with the connection and channel
func NewRabbitMQServer(username string, password string, host string) *Server { return &Server{ RabbitMQUsername: username, RabbitMQPassword: password, RabbitMQHost: host, } }
[ "func NewServer(config ConnectionConfig, logChan chan Log) (*Server, error) {\n\tif config.ReconInterval == 0 {\n\t\treturn nil, fmt.Errorf(\"reconnection interval must be above 0\")\n\t}\n\n\tsrv := &Server{\n\t\tmutex: &sync.RWMutex{},\n\t\tconfig: config,\n\t\tlogChan: logChan,\n\t\tchannels: make(map[stri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defang Takes an IOC and defangs it using the standard defangReplacements
func (ioc *IOC) Defang() *IOC { copy := *ioc ioc = &copy // Just do a string replace on each if replacements, ok := defangReplacements[ioc.Type]; ok { for _, fangPair := range replacements { ioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.fanged, fangPair.defanged) } } return ioc }
[ "func (ioc *IOC) Fang() *IOC {\n\tcopy := *ioc\n\tioc = &copy\n\n\t// String replace all defangs in our standard set\n\tif replacements, ok := defangReplacements[ioc.Type]; ok {\n\t\tfor _, fangPair := range replacements {\n\t\t\tioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.defanged, fangPair.fanged)\n\t\t}\n\t}\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fang Takes an IOC and removes the defanging stuff from it (converts to a fanged IOC).
func (ioc *IOC) Fang() *IOC { copy := *ioc ioc = &copy // String replace all defangs in our standard set if replacements, ok := defangReplacements[ioc.Type]; ok { for _, fangPair := range replacements { ioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.defanged, fangPair.fanged) } } // Regex replace everyth...
[ "func (ioc *IOC) Defang() *IOC {\n\tcopy := *ioc\n\tioc = &copy\n\n\t// Just do a string replace on each\n\tif replacements, ok := defangReplacements[ioc.Type]; ok {\n\t\tfor _, fangPair := range replacements {\n\t\t\tioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.fanged, fangPair.defanged)\n\t\t}\n\t}\n\n\treturn ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsFanged Takes an IOC and returns if it is fanged. Non fanging types (bitcoin, hashes, file, cve) are always determined to not be fanged
func (ioc *IOC) IsFanged() bool { if ioc.Type == Bitcoin || ioc.Type == MD5 || ioc.Type == SHA1 || ioc.Type == SHA256 || ioc.Type == SHA512 || ioc.Type == File || ioc.Type == CVE { return false } // Basically just check if the fanged version is different from the input // This does label a partially ...
[ "func (b Bet) IsForced() bool {\n\tswitch b.Type {\n\tcase bet.Ante, bet.BringIn, bet.SmallBlind, bet.BigBlind, bet.GuestBlind, bet.Straddle:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (me TSpeakingTypeType) IsDebate() bool { return me.String() == \"debate\" }", "func (me TxsdWorkType) IsFc(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get finds projects by tags or all projects or the project in the current directory
func (i *Index) Get(tags []string, all bool) ([]string, error) { switch { case all: err := i.clean() return i.projects(), err case len(tags) > 0: if err := i.clean(); err != nil { return []string{}, err } projectsWithTags := []string{} for _, p := range i.projects() { found, err := i.hasTags(p, tag...
[ "func projects(ctx context.Context) ([]string, error) {\n\tLogf(ctx, \"finding your projects...\")\n\treturn gcloud(ctx, \"projects\", \"list\", \"--format\", \"value(projectId)\")\n}", "func (s *SearchService) Projects(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
loginAttempt increments the number of login attempts in sessions variable
func loginAttempt(sess *sessions.Session) { // Log the attempt if sess.Values[sessLoginAttempt] == nil { sess.Values[sessLoginAttempt] = 1 } else { sess.Values[sessLoginAttempt] = sess.Values[sessLoginAttempt].(int) + 1 } }
[ "func AuthenticateLoginAttempt(r *http.Request) *sessions.Session {\n\tvar userid string\n\tlog.Println(\"Authenticating Login credentials.\")\n\tattemptEmail := template.HTMLEscapeString(r.Form.Get(\"email\")) //Escape special characters for security.\n\tattemptPassword := template.HTMLEscapeString(r.Form.Ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
jdecrypt private function to "decrypt" password
func jdecrypt( stCuen string , stPass string)(stRes string,err error){ var stEnc []byte stEnc, err = base64.StdEncoding.DecodeString(stPass) if err != nil { log.Println("jdecrypt ", stPass, err ) } else{ lon := len(stEnc) lan := len(stCuen) if lon > ...
[ "func DecryptJasypt(encrypted []byte, password string) ([]byte, error) {\n\tif len(encrypted) < des.BlockSize {\n\t\treturn nil, fmt.Errorf(\"Invalid encrypted text. Text length than block size.\")\n\t}\n\n\tsalt := encrypted[:des.BlockSize]\n\tct := encrypted[des.BlockSize:]\n\n\tkey, err := PBKDF1MD5([]byte(passw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
JLoginGET service to return persons data
func JLoginGET(w http.ResponseWriter, r *http.Request) { var params httprouter.Params sess := model.Instance(r) v := view.New(r) v.Vars["token"] = csrfbanana.Token(w, r, sess) params = context.Get(r, "params").(httprouter.Params) cuenta := params.ByName("cuenta") ...
[ "func GetData(accessToken string, w http.ResponseWriter, r *http.Request) {\n\trequest, err := http.NewRequest(\"GET\", \"https://auth.vatsim.net/api/user\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trequest.Header.Add(\"Bearer\", accessToken)\n\trequest.Header.Add(\"accept\", \"application/json\")\n\tc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LoginGET displays the login page
func LoginGET(w http.ResponseWriter, r *http.Request) { sess := model.Instance(r) v := view.New(r) v.Name = "login/login" v.Vars["token"] = csrfbanana.Token(w, r, sess) // Refill any form fields view.Repopulate([]string{"cuenta","password"}, r.Form, v.Vars) v.Render(w) }
[ "func LoginGET(c *gin.Context) {\n\tc.HTML(http.StatusOK, \"login.html\", gin.H{})\n}", "func (m *Repository) GetLogin(w http.ResponseWriter, r *http.Request) {\n\tif m.App.Session.Exists(r.Context(), \"user_id\") {\n\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\trender.Template(w, r, \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new JWK for the desired type. An error will be returned if an invalid type is passed
func NewJwk(kty string) (j *Jwk, err error) { switch kty { case KeyTypeOct, KeyTypeRSA, KeyTypeEC: j = &Jwk{Type: kty} default: err = errors.New("Key Type Invalid. Must be Oct, RSA or EC") } return }
[ "func NewJWK(jwk map[string]interface{}) JWK {\n\treturn jwk\n}", "func (pk PublicKey) JWK() JWK {\n\tentry, ok := pk[JwkProperty]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tjson, ok := entry.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn NewJWK(json)\n}", "func MustNewKeyWithType(input string) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements the json.Marshaler interface and JSON encodes the Jwk
func (jwk *Jwk) MarshalJSON() (data []byte, err error) { // Remove any potentionally conflicting claims from the JWK's additional members delete(jwk.AdditionalMembers, "kty") delete(jwk.AdditionalMembers, "kid") delete(jwk.AdditionalMembers, "alg") delete(jwk.AdditionalMembers, "use") delete(jwk.AdditionalMember...
[ "func JSONEncoder() Encoder { return jsonEncoder }", "func (k *Key) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + k.Encode() + `\"`), nil\n}", "func (j *jws) Serialize(key interface{}) ([]byte, error) {\n\tif j.isJWT {\n\t\treturn j.Compact(key)\n\t}\n\treturn nil, ErrIsNotJWT\n}", "func EncodeJSON(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate checkes the JWK object to verify the parameter set represent a valid JWK. If jwk is valid a nil error will be returned. If a JWK is invalid an error will be returned describing the values that causes the validation to fail.
func (jwk *Jwk) Validate() error { // If the alg parameter is set, make sure it matches the set JWK Type if len(jwk.Algorithm) > 0 { algKeyType := GetKeyType(jwk.Algorithm) if algKeyType != jwk.Type { fmt.Errorf("Jwk Type (kty=%v) doesn't match the algorithm key type (%v)", jwk.Type, algKeyType) } } switc...
[ "func (pk PublicKey) JWK() JWK {\n\tentry, ok := pk[JwkProperty]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tjson, ok := entry.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn NewJWK(json)\n}", "func (m *RemoteJwks) Validate() error {\n\treturn m.validate(false)\n}", "func validate_Groups_ValidateW...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ValidateRSAParams checks the RSA parameters of a RSA type of JWK. If a JWK is invalid an error will be returned describing the values that causes the validation to fail.
func (jwk *Jwk) validateRSAParams() error { if jwk.E < 1 { return errors.New("RSA Required Param (E) is empty/default (<= 0)") } if jwk.N == nil { return errors.New("RSA Required Param (N) is nil") } pOk := jwk.P != nil qOk := jwk.Q != nil dpOk := jwk.Dp != nil dqOk := jwk.Dq != nil qiOk := jwk.Qi != nil ...
[ "func (priv *PKCS11PrivateKeyRSA) Validate() error {\n\tpub := priv.key.PubKey.(*rsa.PublicKey)\n\tif pub.E < 2 {\n\t\treturn errMalformedRSAKey\n\t}\n\t// The software implementation actively rejects 'large' public\n\t// exponents, in order to simplify its own implementation.\n\t// Here, instead, we expect the PKC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewIndexDB creates a new instance of IndexDB
func NewIndexDB(db store.DB, recordStore *RecordDB) *IndexDB { return &IndexDB{db: db, recordStore: recordStore} }
[ "func New(ng engine.Engine) (*DB, error) {\n\tdb := DB{\n\t\tng: ng,\n\t}\n\n\terr := db.Update(func(tx *Tx) error {\n\t\t_, err := tx.GetTable(indexTable)\n\t\tif err == ErrTableNotFound {\n\t\t\t_, err = tx.CreateTable(indexTable)\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tretu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UpdateLastKnownPulse must be called after updating TopSyncPulse
func (i *IndexDB) UpdateLastKnownPulse(ctx context.Context, topSyncPulse insolar.PulseNumber) error { i.lock.Lock() defer i.lock.Unlock() indexes, err := i.ForPulse(ctx, topSyncPulse) if err != nil && err != ErrIndexNotFound { return errors.Wrapf(err, "failed to get indexes for pulse: %d", topSyncPulse) } for...
[ "func (t *tick) updateLast() {\n\tif t.passed() {\n\t\tt.last = t.now\n\t}\n}", "func (ps *RandomPeerSelector) UpdateLast(peer uint32) {\n\tps.last = peer\n}", "func (trd *trxDispatcher) updateLastSeenBlock() {\n\t// get the current value\n\tlsb := trd.blkObserver.Load()\n\tlog.Noticef(\"last seen block is #%d\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TruncateHead remove all records after lastPulse
func (i *IndexDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error { i.lock.Lock() defer i.lock.Unlock() it := i.db.NewIterator(&indexKey{objID: *insolar.NewID(pulse.MinTimePulse, nil), pn: from}, false) defer it.Close() var hasKeys bool for it.Next() { hasKeys = true key := newIndexKey(it....
[ "func (r *RecordDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error {\n\n\tif err := r.truncateRecordsHead(ctx, from); err != nil {\n\t\treturn errors.Wrap(err, \"failed to truncate records head\")\n\t}\n\n\tif err := r.truncatePositionRecordHead(ctx, recordPositionKey{pn: from}, recordPositionKey...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ForID returns a lifeline from a bucket with provided PN and ObjID
func (i *IndexDB) ForID(ctx context.Context, pn insolar.PulseNumber, objID insolar.ID) (record.Index, error) { var buck *record.Index buck, err := i.getBucket(pn, objID) if err == ErrIndexNotFound { lastPN, err := i.getLastKnownPN(objID) if err != nil { return record.Index{}, ErrIndexNotFound } buck, err...
[ "func (s *Storage) GetLifeline(objRef []byte, fromIndex *string, pulseNumberLt, pulseNumberGt, timestampLte, timestampGte *int64, limit, offset int, sortByIndexAsc bool) ([]models.Record, int, error) {\n\ttimer := prometheus.NewTimer(GetLifelineDuration)\n\tdefer timer.ObserveDuration()\n\n\tquery := s.db.Model(&mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WaitReady waits for machinecontroller and its webhook to become ready
func WaitReady(s *state.State) error { if !s.Cluster.MachineController.Deploy { return nil } s.Logger.Infoln("Waiting for machine-controller to come up...") if err := cleanupStaleResources(s.Context, s.DynamicClient); err != nil { return err } if err := waitForWebhook(s.Context, s.DynamicClient); err != ni...
[ "func WaitReady(ctx *util.Context) error {\n\tif !ctx.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\tctx.Logger.Infoln(\"Waiting for machine-controller to come up…\")\n\n\t// Wait a bit to let scheduler to react\n\ttime.Sleep(10 * time.Second)\n\n\tif err := WaitForWebhook(ctx.DynamicClient); err != n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
waitForCRDs waits for machinecontroller CRDs to be created and become established
func waitForCRDs(s *state.State) error { condFn := clientutil.CRDsReadyCondition(s.Context, s.DynamicClient, CRDNames()) err := wait.PollUntilContextTimeout(s.Context, 5*time.Second, 3*time.Minute, false, condFn.WithContext()) return fail.KubeClient(err, "waiting for machine-controller CRDs to became ready") }
[ "func (m *MeshReconciler) waitForCRD(name string, client runtimeclient.Client) error {\n\tm.logger.WithField(\"name\", name).Debug(\"waiting for CRD\")\n\n\tbackoffConfig := backoff.ConstantBackoffConfig{\n\t\tDelay: time.Duration(backoffDelaySeconds) * time.Second,\n\t\tMaxRetries: backoffMaxretries,\n\t}\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DestroyWorkers destroys all MachineDeployment, MachineSet and Machine objects
func DestroyWorkers(s *state.State) error { if !s.Cluster.MachineController.Deploy { s.Logger.Info("Skipping deleting workers because machine-controller is disabled in configuration.") return nil } if s.DynamicClient == nil { return fail.NoKubeClient() } ctx := context.Background() // Annotate nodes with...
[ "func (p *Pool) Destroy() {\n\tp.tasks.Wait()\n\n\tvar wg sync.WaitGroup\n\tfor _, w := range p.Workers() {\n\t\twg.Add(1)\n\t\tgo func(w *Worker) {\n\t\t\tdefer wg.Done()\n\n\t\t\tp.destroyWorker(w)\n\t\t}(w)\n\t}\n\n\twg.Wait()\n}", "func (bc *BaseCluster) Destroy() {\n\tfor _, m := range bc.Machines() {\n\t\tb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WaitDestroy waits for all Machines to be deleted
func WaitDestroy(s *state.State) error { s.Logger.Info("Waiting for all machines to get deleted...") return wait.PollUntilContextTimeout(s.Context, 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) { list := &clusterv1alpha1.MachineList{} if err := s.DynamicClient.List(ctx, list, dyncl...
[ "func (bc *BaseCluster) Destroy() {\n\tfor _, m := range bc.Machines() {\n\t\tbc.numMachines--\n\t\tm.Destroy()\n\t}\n}", "func WaitForCleanup() error {\n\tctx := context.Background()\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar (\n\t\tinterval = time.NewTicker(1 * time.Se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
waitForMachineController waits for machinecontroller to become running
func waitForMachineController(ctx context.Context, client dynclient.Client) error { condFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{ Namespace: resources.MachineControllerNameSpace, LabelSelector: labels.SelectorFromSet(map[string]string{ appLabelKey: resources.MachineControllerName, ...
[ "func waitForMachineState(api *cloudapi.Client, id, state string, timeout time.Duration) error {\n\treturn waitFor(\n\t\tfunc() (bool, error) {\n\t\t\tcurrentState, err := readMachineState(api, id)\n\t\t\treturn currentState == state, err\n\t\t},\n\t\tmachineStateChangeCheckInterval,\n\t\tmachineStateChangeTimeout,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
waitForWebhook waits for machinecontrollerwebhook to become running
func waitForWebhook(ctx context.Context, client dynclient.Client) error { condFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{ Namespace: resources.MachineControllerNameSpace, LabelSelector: labels.SelectorFromSet(map[string]string{ appLabelKey: resources.MachineControllerWebhookName, }...
[ "func waitForWebhook(ctx context.Context, log *zap.SugaredLogger, client ctrlruntimeclient.Client, kubermaticNamespace string) error {\n\t// wait for the webhook to be ready\n\ttimeout := 30 * time.Second\n\tendpoint := types.NamespacedName{Namespace: kubermaticNamespace, Name: \"seed-webhook\"}\n\n\tlog.Infow(\"wa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultKeymap returns a copy of the default Keymap Useful if inspection/customization is needed.
func DefaultKeymap() Keymap { return Keymap{ ansi.NEWLINE: (*Core).Enter, ansi.CARRIAGE_RETURN: (*Core).Enter, ansi.CTRL_C: (*Core).Interrupt, ansi.CTRL_D: (*Core).DeleteOrEOF, ansi.CTRL_H: (*Core).Backspace, ansi.BACKSPACE: (*Core).Backspace, ansi.CTRL_L: ...
[ "func NewDefaultKeyMap() *KeyMap {\n\treturn &KeyMap{\n\t\tYes: []string{\"y\", \"Y\"},\n\t\tNo: []string{\"n\", \"N\"},\n\t\tSelectYes: []string{\"left\"},\n\t\tSelectNo: []string{\"right\"},\n\t\tToggle: []string{\"tab\"},\n\t\tSubmit: []string{\"enter\"},\n\t\tAbort: []string{\"ctrl+c\"},...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFileSystem invokes the dfs.CreateFileSystem API synchronously
func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) { response = CreateCreateFileSystemResponse() err = client.DoAction(request, response) return }
[ "func (client StorageGatewayClient) CreateFileSystem(ctx context.Context, request CreateFileSystemRequest) (response CreateFileSystemResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif req...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFileSystemWithChan invokes the dfs.CreateFileSystem API asynchronously
func (client *Client) CreateFileSystemWithChan(request *CreateFileSystemRequest) (<-chan *CreateFileSystemResponse, <-chan error) { responseChan := make(chan *CreateFileSystemResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response,...
[ "func (client *Client) CreateFileSystemWithCallback(request *CreateFileSystemRequest, callback func(response *CreateFileSystemResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateFileSystemResponse\n\t\tvar err error\n\t\tdefer close(resul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFileSystemWithCallback invokes the dfs.CreateFileSystem API asynchronously
func (client *Client) CreateFileSystemWithCallback(request *CreateFileSystemRequest, callback func(response *CreateFileSystemResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *CreateFileSystemResponse var err error defer close(result) response, err...
[ "func (client *Client) CreateFileSystemWithChan(request *CreateFileSystemRequest) (<-chan *CreateFileSystemResponse, <-chan error) {\n\tresponseChan := make(chan *CreateFileSystemResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(err...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateCreateFileSystemRequest creates a request to invoke CreateFileSystem API
func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) { request = &CreateFileSystemRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("DFS", "2018-06-20", "CreateFileSystem", "alidfs", "openAPI") request.Method = requests.POST return }
[ "func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) {\n\tresponse = CreateCreateFileSystemResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateListFileSystemsRequest() (request *ListFileSystemsRequest) {\n\trequest ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateCreateFileSystemResponse creates a response to parse from CreateFileSystem response
func CreateCreateFileSystemResponse() (response *CreateFileSystemResponse) { response = &CreateFileSystemResponse{ BaseResponse: &responses.BaseResponse{}, } return }
[ "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SaveBody creates or overwrites the existing response body file for the url associated with the given Fetcher
func (f Fetcher) SaveBody() { file, err := os.Create(f.url) check(err) defer file.Close() b := f.processBody() _, err = file.Write(b) check(err) }
[ "func (r *Response) Save(fileName string) error {\r\n\treturn ioutil.WriteFile(fileName, r.Body, 0644)\r\n}", "func saveFile(savedPath string, res *http.Response) {\n\t// create a file of the given name and in the given path\n\tf, err := os.Create(savedPath)\n\terrCheck(err)\n\tio.Copy(f, res.Body)\n}", "func (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
processBody reads the response body associated for the given Fetcher and reports any errors
func (f Fetcher) processBody() []byte { b, err := io.ReadAll(f.resp.Body) f.resp.Body.Close() if f.resp.StatusCode >= 300 { log.Fatalf("Response failed with status code: %d\n and body: %s\n", f.resp.StatusCode, b) } check(err) return b }
[ "func postprocessJSONResponse(resp *http.Response, errCh chan error, proc func(body []byte) error) {\n\tbody, err := readBodyWithTimeout(resp.Body, responseBodyReadTimeout)\n\tif err != nil {\n\t\terrCh <- err\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\terrCh <- parseError(body)\n\t\treturn\n\t}\n\t// proc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewTreeFromState initiates a Tree from state data previously written by
func NewTreeFromState(data io.Reader) (*Tree, error) { idx := &Tree{ newBlocks: make(chan int), done: make(chan bool), blockMap: make(map[int]int), } if err := idx.loadState(data); err != nil { return nil, fmt.Errorf("Failed loading index state : %v", err) } go idx.blockAllocator() return idx, nil }
[ "func (db *DB) InitStateTree(depth int) error {\n\trootNode := core.NewStateRoot(depth)\n\treturn db.Instance.Create(&rootNode).Error\n}", "func newTree() *tree {\n\treturn &tree{Index: 0}\n}", "func InitAllTree(currentState state.State, quit chan struct{}) StateNode {\n\tnextState := state.CopyState(currentSta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Len returns the current number of items in the tree It needs to query all allocators for their counters, so it will block if an allocator is constantly reserved...
func (idx *Tree) Len() (count int) { idx.Stop() count = int(idx.liveObjects) for _, a := range idx.allocators { count += int(a.itemCounter) } idx.Start() return }
[ "func (t *Tree) Len() int { return t.Count }", "func (t *BinaryTree) Size() int { return t.count }", "func (t *Tree) Len() int {\n\treturn t.Count\n}", "func (p NodePools) Len() int { return len(p) }", "func (t *Tree) Len() int {\n\treturn (int(t.leaves-1)*BlockSize + int(t.lastBlockLen))\n}", "func (r *R...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop withdraws all allocators to prevent any more write or reads. It will blocks until it gets all allocators. If already stopped, it returns silently.
func (idx *Tree) Stop() { if !atomic.CompareAndSwapInt32(&idx.stopped, 0, 1) { return } for i := 0; i < len(idx.allocators); i++ { _ = idx.allocatorQueue.get() } }
[ "func (fetchers Fetchers) Stop() {\n\tfor _, fetcher := range fetchers {\n\t\tfetcher.Stop()\n\t}\n}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func exitAllocRunner(runners ...AllocRunner) {\n\tfor _, ar := range runners {\n\t\tterminalAlloc := a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start releases all allocators withdrawn through a previous call to Stop. In case the indexed is not stopped in returns silently.
func (idx *Tree) Start() { if !atomic.CompareAndSwapInt32(&idx.stopped, 1, 0) { return } for i := 0; i < len(idx.allocators); i++ { idx.allocatorQueue.put(i) } }
[ "func (idx *Tree) Stop() {\n\tif !atomic.CompareAndSwapInt32(&idx.stopped, 0, 1) {\n\t\treturn\n\t}\n\tfor i := 0; i < len(idx.allocators); i++ {\n\t\t_ = idx.allocatorQueue.get()\n\t}\n}", "func (d *dummyContractStakingIndexer) Start(ctx context.Context) error {\n\treturn nil\n}", "func (mi *MinerIndex) start(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WriteState writes the state of a stopped index to the given writer. If the indexed is not stopped the result is undefined.
func (idx *Tree) WriteState(out io.Writer) (n int, err error) { return idx.writeState(out) }
[ "func (c *Client) WriteState(m gopongmsg.State) error {\n\tm.LastInputIndex = c.GetLastInputIndex()\n\tmsg := gopongmsg.Server{\n\t\tMsg: &gopongmsg.Server_State{\n\t\t\tState: &m,\n\t\t},\n\t}\n\n\treturn c.Write(msg)\n}", "func (d *EtcdStateDriver) WriteState(key string, value core.State,\n\tmarshal func(interf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
allocateNode returns the new node and its data block, position at start
func (idx *Tree) allocateNode(a *Allocator, count int, prefixLen int) (n uint64, data []uint64) { prefixSlots := (prefixLen + 7) >> 3 if prefixLen >= 255 { prefixSlots++ } count += prefixSlots n = a.newNode(count) block := int(n >> blockSlotsShift) offset := int(n & blockSlotsOffsetMask) data = idx.blocks[blo...
[ "func (t *BTree) AllocateNode() *BTreeNode {\n\tx := BTreeNode{}\n\tfor i := 0; i < 2*t.t; i++ {\n\t\tx.children = append(x.children, t.nullNode)\n\t}\n\tfor i := 0; i < 2*t.t-1; i++ {\n\t\tx.keys = append(x.keys, -1)\n\t}\n\treturn &x\n}", "func (idx *Tree) allocateNodeWithPrefix(a *Allocator, count int, prefix ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
allocateNodeWithPrefix returns the new node and its data block, positioned after the prefix
func (idx *Tree) allocateNodeWithPrefix(a *Allocator, count int, prefix []byte) (n uint64, data []uint64) { prefixLen := len(prefix) prefixSlots := (prefixLen + 7) >> 3 if prefixLen >= 255 { prefixSlots++ } count += prefixSlots n = a.newNode(count) block := int(n >> blockSlotsShift) offset := int(n & blockSlo...
[ "func (idx *Tree) allocateNode(a *Allocator, count int, prefixLen int) (n uint64, data []uint64) {\n\tprefixSlots := (prefixLen + 7) >> 3\n\tif prefixLen >= 255 {\n\t\tprefixSlots++\n\t}\n\tcount += prefixSlots\n\tn = a.newNode(count)\n\tblock := int(n >> blockSlotsShift)\n\toffset := int(n & blockSlotsOffsetMask)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetAllocator reserves an allocator used for bulk Lookup/Update/Delete operations.
func (idx *Tree) GetAllocator() *Allocator { return idx.allocators[idx.allocatorQueue.get()] }
[ "func GetRuntimePortAllocator() (*RuntimePortAllocator, error) {\n\tif rpa.pa == nil {\n\t\tif err := rpa.createAndRestorePortAllocator(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn rpa, nil\n}", "func NewAllocator(provider lCoreProvider) *Allocator {\n\treturn &Allocator{\n\t\tConfig: make(Alloc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ReleaseAllocator returns an allocator previously reserved using GetAllocator
func (idx *Tree) ReleaseAllocator(a *Allocator) { idx.allocatorQueue.put(a.id) }
[ "func (allocator *Allocator) ReleaseAllocator() {\n\tC.zj_AllocatorRelease(allocator.A)\n}", "func (a *ResourceAllocator) Free(b []byte) {\n\tif a == nil {\n\t\tDefaultAllocator.Free(b)\n\t\treturn\n\t}\n\n\tsize := len(b)\n\n\t// Release the memory to the allocator first.\n\talloc := a.allocator()\n\talloc.Free(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PrepareUpdate reserves an allocator and uses it to prepare an update operation. See Allocator.PrepareUpdate for details
func (idx *Tree) PrepareUpdate(key []byte) (found bool, op *UpdateOperation) { id := idx.allocatorQueue.get() op = newUpdateOperation(idx, idx.allocators[id], true) return op.prepareUpdate(key), op }
[ "func (t *Table) PrepareUpdate(tu fibdef.Update) (*UpdateCommand, error) {\n\tu := &UpdateCommand{}\n\tu.real.RealUpdate = tu.Real()\n\tu.virt.VirtUpdate = tu.Virt()\n\n\tu.allocSplit = u.real.prepare(t)\n\tu.allocated = make([]*Entry, u.allocSplit+u.virt.prepare(t))\n\tif e := t.allocBulk(u.allocated); e != nil {\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PrepareDelete reserves an allocator and uses it to prepare a delete operation. See Allocator.PrepareDelete for details
func (idx *Tree) PrepareDelete(key []byte) (found bool, op *DeleteOperation) { id := idx.allocatorQueue.get() op = newDeleteOperation(idx, idx.allocators[id], true) if op.prepare(key) { return true, op } op.Abort() return false, nil }
[ "func (client DevicesClient) DeletePreparer(ctx context.Context, deviceName string, resourceGroupName string, managerName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"deviceName\": deviceName,\n\t\t\"managerName\": managerName,\n\t\t\"resourceGroupName\": resourc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MaxKey returns the maximum key having the given searchPrefix, or the maximum key in the whole index if searchIndex is nil. Maximum means the last key in the lexicographic order. If keys are uint64 in BigEndian it is also the largest number. If ok is false the index is empty. For example, if we store temperature reading...
func (idx *Tree) MaxKey(searchPrefix []byte) (v uint64, ok bool) { raw, _ := idx.partialSearch(searchPrefix) if raw == 0 { return 0, false } if isLeaf(raw) { return getLeafValue(raw), true } // now find the max searchLoop: for { _, node, count, prefixLen := explodeNode(raw) block := int(node >> blockSlot...
[ "func GetMaxIndexKey(shardID uint64, key []byte) []byte {\n\tkey = getKeySlice(key, idKeyLength)\n\treturn getIDKey(maxIndexSuffix, shardID, key)\n}", "func MaxKey() Val { return Val{t: bsontype.MaxKey} }", "func (b *storeBuilder) MaxKey() uint32 {\n\treturn b.maxKey\n}", "func MaxKey() uint64 {\n\treturn con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MinKey returns the minimum key having the given searchPrefix, or the minimum key in the whole index if searchIndex is nil. Minimum means the first key in the lexicographic order. If keys are uint64 in BigEndian it is also the smallest number. If ok is false the index is empty.
func (idx *Tree) MinKey(searchPrefix []byte) (v uint64, ok bool) { raw, _ := idx.partialSearch(searchPrefix) if raw == 0 { return 0, false } if isLeaf(raw) { return getLeafValue(raw), true } // now find the min searchLoop: for { _, node, count, prefixLen := explodeNode(raw) block := int(node >> blockSlot...
[ "func (mi *MetricIndex) FindLowestPrefix(path string) (iter iterator.Iterator, reg *regexp.Regexp, prefix string, err error) {\n\n\tsegs := strings.Split(path, \".\")\n\tpLen := len(segs)\n\n\t// find the longest chunk w/o a reg and that will be the level db prefix filter\n\tneedsRegex := needRegex(path)\n\n\tlongC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }