Merge branch 'v1.14' into resiliency-error-code-retries-docs

This commit is contained in:
Mark Fussell 2024-10-09 14:32:36 +01:00 committed by GitHub
commit dc120a05f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 38 additions and 26 deletions

View File

@ -104,7 +104,7 @@ The Dapr actor runtime provides a simple turn-based access model for accessing a
### State
Transactional state stores can be used to store actor state. To specify which state store to use for actors, specify value of property `actorStateStore` as `true` in the state store component's metadata section. Actors state is stored with a specific scheme in transactional state stores, allowing for consistent querying. Only a single state store component can be used as the state store for all actors. Read the [state API reference]({{< ref state_api.md >}}) and the [actors API reference]({{< ref actors_api.md >}}) to learn more about state stores for actors.
Transactional state stores can be used to store actor state. Regardless of whether you intend to store any state in your actor, you must specify a value for property `actorStateStore` as `true` in the state store component's metadata section. Actors state is stored with a specific scheme in transactional state stores, allowing for consistent querying. Only a single state store component can be used as the state store for all actors. Read the [state API reference]({{< ref state_api.md >}}) and the [actors API reference]({{< ref actors_api.md >}}) to learn more about state stores for actors.
### Actor timers and reminders

View File

@ -103,11 +103,9 @@ func prodDBBackupHandler(ctx context.Context, job *common.JobEvent) error {
if err := json.Unmarshal(job.Data, &jobData); err != nil {
// ...
}
decodedPayload, err := base64.StdEncoding.DecodeString(jobData.Value)
// ...
var jobPayload api.DBBackup
if err := json.Unmarshal(decodedPayload, &jobPayload); err != nil {
if err := json.Unmarshal(job.Data, &jobPayload); err != nil {
// ...
}
fmt.Printf("job %d received:\n type: %v \n typeurl: %v\n value: %v\n extracted payload: %v\n", jobCount, job.JobType, jobData.TypeURL, jobData.Value, jobPayload)

View File

@ -106,8 +106,25 @@ Want to skip the quickstarts? Not a problem. You can try out the workflow buildi
## Limitations
- **State stores:** As of the 1.12.0 beta release of Dapr Workflow, using the NoSQL databases as a state store results in limitations around storing internal states. For example, CosmosDB has a maximum single operation item limit of only 100 states in a single request.
- **Horizontal scaling:** As of the 1.12.0 beta release of Dapr Workflow, if you scale out Dapr sidecars or your application pods to more than 2, then the concurrency of the workflow execution drops. It is recommended to test with 1 or 2 instances, and no more than 2.
- **State stores:** Due to underlying limitations in some database choices, more commonly NoSQL databases, you might run into limitations around storing internal states. For example, CosmosDB has a maximum single operation item limit of only 100 states in a single request.
- **Horizontal scaling:** As of the 1.12.0 beta release of Dapr Workflow, it is recommended to use a maximum of two instances of Dapr per workflow application. This limitation is resolved in Dapr 1.14.x when enabling the scheduler service.
To enable the scheduler service to work for Dapr Workflows, make sure you're using Dapr 1.14.x or later and assign the following configuration to your app:
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: schedulerconfig
spec:
tracing:
samplingRate: "1"
features:
- name: SchedulerReminders
enabled: true
```
See more info about [enabling preview features]({{<ref preview-features>}}).
## Watch the demo

View File

@ -749,7 +749,7 @@ def status_monitor_workflow(ctx: wf.DaprWorkflowContext, job: JobStatus):
ctx.call_activity(send_alert, input=f"Job '{job.job_id}' is unhealthy!")
next_sleep_interval = 5 # check more frequently when unhealthy
yield ctx.create_timer(fire_at=ctx.current_utc_datetime + timedelta(seconds=next_sleep_interval))
yield ctx.create_timer(fire_at=ctx.current_utc_datetime + timedelta(minutes=next_sleep_interval))
# restart from the beginning with a new JobStatus input
ctx.continue_as_new(job)
@ -896,7 +896,7 @@ func StatusMonitorWorkflow(ctx *workflow.WorkflowContext) (any, error) {
}
if status == "healthy" {
job.IsHealthy = true
sleepInterval = time.Second * 60
sleepInterval = time.Minutes * 60
} else {
if job.IsHealthy {
job.IsHealthy = false
@ -905,7 +905,7 @@ func StatusMonitorWorkflow(ctx *workflow.WorkflowContext) (any, error) {
return "", err
}
}
sleepInterval = time.Second * 5
sleepInterval = time.Minutes * 5
}
if err := ctx.CreateTimer(sleepInterval).Await(nil); err != nil {
return "", err

View File

@ -273,23 +273,20 @@ func deleteJob(ctx context.Context, in *common.InvocationEvent) (out *common.Con
// Handler that handles job events
func handleJob(ctx context.Context, job *common.JobEvent) error {
var jobData common.Job
if err := json.Unmarshal(job.Data, &jobData); err != nil {
return fmt.Errorf("failed to unmarshal job: %v", err)
}
decodedPayload, err := base64.StdEncoding.DecodeString(jobData.Value)
if err != nil {
return fmt.Errorf("failed to decode job payload: %v", err)
}
var jobPayload JobData
if err := json.Unmarshal(decodedPayload, &jobPayload); err != nil {
return fmt.Errorf("failed to unmarshal payload: %v", err)
}
var jobData common.Job
if err := json.Unmarshal(job.Data, &jobData); err != nil {
return fmt.Errorf("failed to unmarshal job: %v", err)
}
fmt.Println("Starting droid:", jobPayload.Droid)
fmt.Println("Executing maintenance job:", jobPayload.Task)
var jobPayload JobData
if err := json.Unmarshal(job.Data, &jobPayload); err != nil {
return fmt.Errorf("failed to unmarshal payload: %v", err)
}
return nil
fmt.Println("Starting droid:", jobPayload.Droid)
fmt.Println("Executing maintenance job:", jobPayload.Task)
return nil
}
```

View File

@ -83,8 +83,8 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| `maxConcurrentHandlers` | N | Defines the maximum number of concurrent message handlers. Default: `0` (unlimited) | `10`
| `disableEntityManagement` | N | When set to true, queues and subscriptions do not get created automatically. Default: `"false"` | `"true"`, `"false"`
| `defaultMessageTimeToLiveInSec` | N | Default message time to live, in seconds. Used during subscription creation only. | `10`
| `autoDeleteOnIdleInSec` | N | Time in seconds to wait before auto deleting idle subscriptions. Used during subscription creation only. Default: `0` (disabled) | `3600`
| `maxDeliveryCount` | N | Defines the number of attempts the server will make to deliver a message. Used during subscription creation only. Must be 300s or greater. Default set by server. | `10`
| `autoDeleteOnIdleInSec` | N | Time in seconds to wait before auto deleting idle subscriptions. Used during subscription creation only. Must be 300s or greater. Default: `0` (disabled) | `3600`
| `maxDeliveryCount` | N | Defines the number of attempts the server makes to deliver a message. Used during subscription creation only. Default set by server. | `10`
| `lockDurationInSec` | N | Defines the length in seconds that a message will be locked for before expiring. Used during subscription creation only. Default set by server. | `30`
| `minConnectionRecoveryInSec` | N | Minimum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. Default: `2` | `5`
| `maxConnectionRecoveryInSec` | N | Maximum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. After each attempt, the component waits a random number of seconds, increasing every time, between the minimum and the maximum. Default: `300` (5 minutes) | `600`