Skip to content

Commit

Permalink
merging main
Browse files Browse the repository at this point in the history
  • Loading branch information
masseyke committed Oct 16, 2023
2 parents 59c345d + 8e0fd22 commit 4aa218d
Show file tree
Hide file tree
Showing 211 changed files with 4,647 additions and 1,051 deletions.
2 changes: 1 addition & 1 deletion .buildkite/pipelines/periodic-platform-support.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ steps:
- group: platform-support-unix
steps:
- label: "{{matrix.image}} / platform-support-unix"
command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true platformSupportTests
command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true functionalTests
timeout_in_minutes: 420
matrix:
setup:
Expand Down
2 changes: 1 addition & 1 deletion .ci/jobs.t/elastic+elasticsearch+periodic+ear.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@
ln -s "$PWD" "$WORKSPACE"
- shell: |
#!/usr/local/bin/runbld --redirect-stderr
$WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true check
$WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true functionalTests
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ import java.nio.file.Files
String buildNumber = System.getenv('BUILD_NUMBER') ?: System.getenv('BUILDKITE_BUILD_NUMBER')
String performanceTest = System.getenv('BUILD_PERFORMANCE_TEST')
if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(project) == false) {
File uploadFile = file("build/${buildNumber}.tar.bz2")
def uploadFilePath = "build/${buildNumber}.tar.bz2"
File uploadFile = file(uploadFilePath)
project.gradle.buildFinished { result ->
println "build complete, generating: $uploadFile"
if (uploadFile.exists()) {
Expand Down Expand Up @@ -64,5 +65,27 @@ if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(projec
} catch (Exception e) {
logger.lifecycle("Failed to archive additional logs", e)
}

if (uploadFile.exists() && System.getenv("BUILDKITE") == "true") {
try {
println "Uploading buildkite artifact: ${uploadFilePath}..."
new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath)
.start()
.waitFor()

println "Generating buildscan link for artifact..."

def process = new ProcessBuilder("buildkite-agent", "artifact", "search", uploadFilePath, "--step", System.getenv('BUILDKITE_JOB_ID'), "--format", "%i").start()
process.waitFor()
def artifactUuid = (process.text ?: "").trim()

println "Artifact UUID: ${artifactUuid}"
if (artifactUuid) {
buildScan.link 'Artifact Upload', "https://buildkite.com/organizations/elastic/pipelines/${System.getenv('BUILDKITE_PIPELINE_SLUG')}/builds/${buildNumber}/jobs/${System.getenv('BUILDKITE_JOB_ID')}/artifacts/${artifactUuid}"
}
} catch (Exception e) {
logger.lifecycle("Failed to upload buildkite artifact", e)
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -98,10 +98,17 @@ buildScan {
def branch = System.getenv('BUILDKITE_BRANCH')
def repoMatcher = System.getenv('BUILDKITE_REPO') =~ /(https:\/\/github\.com\/|git@github\.com:)(\S+)\.git/
def repository = repoMatcher.matches() ? repoMatcher.group(2) : "<unknown>"
def jobName = (System.getenv('BUILDKITE_LABEL') ?: '').replaceAll(/[^a-zA-Z0-9_\-]+/, '_').toLowerCase()

tag 'CI'
link 'CI Build', buildKiteUrl
value 'Job Number', System.getenv('BUILDKITE_BUILD_NUMBER')

value 'Pipeline', System.getenv('BUILDKITE_PIPELINE_SLUG')
tag System.getenv('BUILDKITE_PIPELINE_SLUG')

value 'Job Name', jobName
tag jobName

// Add SCM information
def prId = System.getenv('BUILDKITE_PULL_REQUEST')
Expand Down
4 changes: 2 additions & 2 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ if (bwc_tests_enabled == false) {
println "See ${bwc_tests_disabled_issue}"
println "==========================================================="
}
if (project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'platformSupportTests' }) {
if (project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'functionalTests' }) {
// Disable BWC tests for checkPart* tasks and platform support tests as it's expected that this will run on it's own check
bwc_tests_enabled = false
}
Expand Down Expand Up @@ -256,7 +256,7 @@ allprojects {
tasks.register('checkPart1') { dependsOn 'check' }
}

tasks.register('platformSupportTests') { dependsOn 'check'}
tasks.register('functionalTests') { dependsOn 'check'}
}

/*
Expand Down
9 changes: 9 additions & 0 deletions docs/changelog/100033.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
pr: 100033
summary: "[Behavioral Analytics] Analytics collections use Data Stream Lifecycle (DSL)\
\ instead of Index Lifecycle Management (ILM) for data retention management. Behavioral\
\ analytics has traditionally used ILM to manage data retention. Starting with 8.12.0,\
\ this will change. Analytics collections created prior to 8.12.0 will continue to use\
\ their existing ILM policies, but new analytics collections will be managed using DSL."
area: Application
type: feature
issues: [ ]
5 changes: 5 additions & 0 deletions docs/changelog/100383.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100383
summary: Push s3 requests count via metrics API
area: Distributed
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/100466.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100466
summary: "Introduce includeShardsStats in the stats request to indicate that we only fetch a summary"
area: Stats
type: enhancement
issues: [99744]
6 changes: 6 additions & 0 deletions docs/changelog/100492.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 100492
summary: Add runtime field of type `geo_shape`
area: Geo
type: enhancement
issues:
- 61299
5 changes: 5 additions & 0 deletions docs/changelog/100594.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100594
summary: Grant editor and viewer access to profiling
area: Authorization
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/100646.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100646
summary: Support complex datemath expressions in index and index alias names
area: Search
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/100766.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 100766
summary: "ESQL: Properly handle multi-values in fold() and date math"
area: ES|QL
type: bug
issues:
- 100497
6 changes: 6 additions & 0 deletions docs/changelog/100779.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 100779
summary: Fix NullPointerException in RotableSecret
area: Security
type: bug
issues:
- 99759
5 changes: 5 additions & 0 deletions docs/changelog/100828.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100828
summary: Consider task cancelled exceptions as recoverable
area: Transform
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/100846.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 100846
summary: Consistent scores for multi-term `SourceConfirmedTestQuery`
area: Search
type: bug
issues:
- 98712
5 changes: 5 additions & 0 deletions docs/changelog/100862.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100862
summary: Sending an index name to `DocumentParsingObserver` that is not ever null
area: Ingest Node
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/100886.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100886
summary: Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment]
area: Machine Learning
type: bug
issues: []
7 changes: 7 additions & 0 deletions docs/reference/esql/functions/starts_with.asciidoc
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
[[esql-starts_with]]
=== `STARTS_WITH`
[.text-center]
image::esql/functions/signature/ends_with.svg[Embedded,opts=inline]

Returns a boolean that indicates whether a keyword string starts with another
string:

Expand All @@ -11,3 +14,7 @@ include::{esql-specs}/docs.csv-spec[tag=startsWith]
|===
include::{esql-specs}/docs.csv-spec[tag=startsWith-result]
|===

Supported types:

include::types/starts_with.asciidoc[]
7 changes: 7 additions & 0 deletions docs/reference/esql/functions/trim.asciidoc
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
[[esql-trim]]
=== `TRIM`
[.text-center]
image::esql/functions/signature/trim.svg[Embedded,opts=inline]

Removes leading and trailing whitespaces from strings.

[source.merge.styled,esql]
Expand All @@ -10,3 +13,7 @@ include::{esql-specs}/string.csv-spec[tag=trim]
|===
include::{esql-specs}/string.csv-spec[tag=trim-result]
|===

Supported types:

include::types/trim.asciidoc[]
8 changes: 4 additions & 4 deletions docs/reference/ilm/actions/ilm-rollover.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,10 @@ opt in to rolling over empty indices, by adding a `"min_docs": 0` condition. Thi
disabled on a cluster-wide basis by setting `indices.lifecycle.rollover.only_if_has_documents` to
`false`.

NOTE: The rollover action implicitly always rolls over a data stream or alias if one or more shards contain
200000000 or more documents. Normally a shard will reach 50GB long before it reaches 200M documents,
but this isn't the case for space efficient data sets. Search performance will very likely suffer
if a shard contains more than 200M documents. This is the reason of the builtin limit.
IMPORTANT: The rollover action implicitly always rolls over a data stream or alias if one or more shards contain
200000000 or more documents. Normally a shard will reach 50GB long before it reaches 200M documents,
but this isn't the case for space efficient data sets. Search performance will very likely suffer
if a shard contains more than 200M documents. This is the reason of the builtin limit.

[[ilm-rollover-ex]]
==== Example
Expand Down
12 changes: 12 additions & 0 deletions docs/reference/ilm/index-rollover.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,15 @@ TIP: Rolling over to a new index based on size, document count, or age is prefer
to time-based rollovers. Rolling over at an arbitrary time often results in
many small indices, which can have a negative impact on performance and
resource usage.

IMPORTANT: Empty indices will not be rolled over, even if they have an associated `max_age` that
would otherwise result in a roll over occurring. A policy can override this behavior, and explicitly
opt in to rolling over empty indices, by adding a `"min_docs": 0` condition. This can also be
disabled on a cluster-wide basis by setting `indices.lifecycle.rollover.only_if_has_documents` to
`false`.

IMPORTANT: The rollover action implicitly always rolls over a data stream or alias if one or more shards contain
200000000 or more documents. Normally a shard will reach 50GB long before it reaches 200M documents,
but this isn't the case for space efficient data sets. Search performance will very likely suffer
if a shard contains more than 200M documents. This is the reason of the builtin limit.

5 changes: 5 additions & 0 deletions docs/reference/ilm/set-up-lifecycle-policy.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,11 @@ PUT _ilm/policy/my_policy
<2> Delete the index 30 days after rollover
====

IMPORTANT: The rollover action implicitly always rolls over a data stream or alias if one or more shards contain
200000000 or more documents. Normally a shard will reach 25GB long before it reaches 200M documents,
but this isn't the case for space efficient data sets. Search performance will very likely suffer
if a shard contains more than 200M documents. This is the reason of the builtin limit.

[discrete]
[[apply-policy-template]]
=== Apply lifecycle policy with an index template
Expand Down
16 changes: 8 additions & 8 deletions docs/reference/query-rules/apis/get-query-ruleset.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,9 @@ PUT _query_rules/my-ruleset
"type": "pinned",
"criteria": [
{
"type": "exact",
"type": "contains",
"metadata": "query_string",
"values": [ "marvel" ]
"values": [ "pugs", "puggles" ]
}
],
"actions": {
Expand All @@ -69,9 +69,9 @@ PUT _query_rules/my-ruleset
"type": "pinned",
"criteria": [
{
"type": "exact",
"type": "fuzzy",
"metadata": "query_string",
"values": [ "dc" ]
"values": [ "rescue dogs" ]
}
],
"actions": {
Expand Down Expand Up @@ -117,9 +117,9 @@ A sample response:
"type": "pinned",
"criteria": [
{
"type": "exact",
"type": "contains",
"metadata": "query_string",
"values": [ "marvel" ]
"values": [ "pugs", "puggles" ]
}
],
"actions": {
Expand All @@ -134,9 +134,9 @@ A sample response:
"type": "pinned",
"criteria": [
{
"type": "exact",
"type": "fuzzy",
"metadata": "query_string",
"values": [ "dc" ]
"values": [ "rescue dogs" ]
}
],
"actions": {
Expand Down
Loading

0 comments on commit 4aa218d

Please sign in to comment.