Class GPUOptions.Experimental.Builder

java.lang.Object
com.google.protobuf.AbstractMessageLite.Builder
com.google.protobuf.AbstractMessage.Builder<GPUOptions.Experimental.Builder>
com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
org.tensorflow.framework.GPUOptions.Experimental.Builder
All Implemented Interfaces:
com.google.protobuf.Message.Builder, com.google.protobuf.MessageLite.Builder, com.google.protobuf.MessageLiteOrBuilder, com.google.protobuf.MessageOrBuilder, Cloneable, GPUOptions.ExperimentalOrBuilder
Enclosing class:
GPUOptions.Experimental

public static final class GPUOptions.Experimental.Builder extends com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder> implements GPUOptions.ExperimentalOrBuilder
Protobuf type tensorflow.GPUOptions.Experimental
  • Method Details

    • getDescriptor

      public static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
    • internalGetFieldAccessorTable

      protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
      Specified by:
      internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • clear

      Specified by:
      clear in interface com.google.protobuf.Message.Builder
      Specified by:
      clear in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      clear in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • getDescriptorForType

      public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
      Specified by:
      getDescriptorForType in interface com.google.protobuf.Message.Builder
      Specified by:
      getDescriptorForType in interface com.google.protobuf.MessageOrBuilder
      Overrides:
      getDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • getDefaultInstanceForType

      public GPUOptions.Experimental getDefaultInstanceForType()
      Specified by:
      getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuilder
      Specified by:
      getDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilder
    • build

      public GPUOptions.Experimental build()
      Specified by:
      build in interface com.google.protobuf.Message.Builder
      Specified by:
      build in interface com.google.protobuf.MessageLite.Builder
    • buildPartial

      public GPUOptions.Experimental buildPartial()
      Specified by:
      buildPartial in interface com.google.protobuf.Message.Builder
      Specified by:
      buildPartial in interface com.google.protobuf.MessageLite.Builder
    • clone

      Specified by:
      clone in interface com.google.protobuf.Message.Builder
      Specified by:
      clone in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      clone in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • setField

      public GPUOptions.Experimental.Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
      Specified by:
      setField in interface com.google.protobuf.Message.Builder
      Overrides:
      setField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • clearField

      public GPUOptions.Experimental.Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field)
      Specified by:
      clearField in interface com.google.protobuf.Message.Builder
      Overrides:
      clearField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • clearOneof

      public GPUOptions.Experimental.Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)
      Specified by:
      clearOneof in interface com.google.protobuf.Message.Builder
      Overrides:
      clearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • setRepeatedField

      public GPUOptions.Experimental.Builder setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value)
      Specified by:
      setRepeatedField in interface com.google.protobuf.Message.Builder
      Overrides:
      setRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • addRepeatedField

      public GPUOptions.Experimental.Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
      Specified by:
      addRepeatedField in interface com.google.protobuf.Message.Builder
      Overrides:
      addRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • mergeFrom

      public GPUOptions.Experimental.Builder mergeFrom(com.google.protobuf.Message other)
      Specified by:
      mergeFrom in interface com.google.protobuf.Message.Builder
      Overrides:
      mergeFrom in class com.google.protobuf.AbstractMessage.Builder<GPUOptions.Experimental.Builder>
    • mergeFrom

    • isInitialized

      public final boolean isInitialized()
      Specified by:
      isInitialized in interface com.google.protobuf.MessageLiteOrBuilder
      Overrides:
      isInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • mergeFrom

      public GPUOptions.Experimental.Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException
      Specified by:
      mergeFrom in interface com.google.protobuf.Message.Builder
      Specified by:
      mergeFrom in interface com.google.protobuf.MessageLite.Builder
      Overrides:
      mergeFrom in class com.google.protobuf.AbstractMessage.Builder<GPUOptions.Experimental.Builder>
      Throws:
      IOException
    • getVirtualDevicesList

      public List<GPUOptions.Experimental.VirtualDevices> getVirtualDevicesList()
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
      Specified by:
      getVirtualDevicesList in interface GPUOptions.ExperimentalOrBuilder
    • getVirtualDevicesCount

      public int getVirtualDevicesCount()
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
      Specified by:
      getVirtualDevicesCount in interface GPUOptions.ExperimentalOrBuilder
    • getVirtualDevices

      public GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
      Specified by:
      getVirtualDevices in interface GPUOptions.ExperimentalOrBuilder
    • setVirtualDevices

      public GPUOptions.Experimental.Builder setVirtualDevices(int index, GPUOptions.Experimental.VirtualDevices value)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • setVirtualDevices

      public GPUOptions.Experimental.Builder setVirtualDevices(int index, GPUOptions.Experimental.VirtualDevices.Builder builderForValue)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • addVirtualDevices

       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • addVirtualDevices

      public GPUOptions.Experimental.Builder addVirtualDevices(int index, GPUOptions.Experimental.VirtualDevices value)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • addVirtualDevices

       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • addVirtualDevices

      public GPUOptions.Experimental.Builder addVirtualDevices(int index, GPUOptions.Experimental.VirtualDevices.Builder builderForValue)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • addAllVirtualDevices

      public GPUOptions.Experimental.Builder addAllVirtualDevices(Iterable<? extends GPUOptions.Experimental.VirtualDevices> values)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • clearVirtualDevices

      public GPUOptions.Experimental.Builder clearVirtualDevices()
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • removeVirtualDevices

      public GPUOptions.Experimental.Builder removeVirtualDevices(int index)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • getVirtualDevicesBuilder

      public GPUOptions.Experimental.VirtualDevices.Builder getVirtualDevicesBuilder(int index)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • getVirtualDevicesOrBuilder

      public GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(int index)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
      Specified by:
      getVirtualDevicesOrBuilder in interface GPUOptions.ExperimentalOrBuilder
    • getVirtualDevicesOrBuilderList

      public List<? extends GPUOptions.Experimental.VirtualDevicesOrBuilder> getVirtualDevicesOrBuilderList()
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
      Specified by:
      getVirtualDevicesOrBuilderList in interface GPUOptions.ExperimentalOrBuilder
    • addVirtualDevicesBuilder

      public GPUOptions.Experimental.VirtualDevices.Builder addVirtualDevicesBuilder()
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • addVirtualDevicesBuilder

      public GPUOptions.Experimental.VirtualDevices.Builder addVirtualDevicesBuilder(int index)
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • getVirtualDevicesBuilderList

      public List<GPUOptions.Experimental.VirtualDevices.Builder> getVirtualDevicesBuilderList()
       The multi virtual device settings. If empty (not set), it will create
       single virtual device on each visible GPU, according to the settings
       in "visible_device_list" above. Otherwise, the number of elements in the
       list must be the same as the number of visible GPUs (after
       "visible_device_list" filtering if it is set), and the string represented
       device names (e.g. /device:GPU:<id>) will refer to the virtual
       devices and have the <id> field assigned sequentially starting from 0,
       according to the order of the virtual devices determined by
       device_ordinal and the location in the virtual device list.
      
       For example,
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB }
         virtual_devices { memory_limit: 3GB memory_limit: 4GB }
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory
         /device:GPU:1 -> visible GPU 1 with 2GB memory
         /device:GPU:2 -> visible GPU 0 with 3GB memory
         /device:GPU:3 -> visible GPU 0 with 4GB memory
      
       but
         visible_device_list = "1,0"
         virtual_devices { memory_limit: 1GB memory_limit: 2GB
                           device_ordinal: 10 device_ordinal: 20}
         virtual_devices { memory_limit: 3GB memory_limit: 4GB
                           device_ordinal: 10 device_ordinal: 20}
       will create 4 virtual devices as:
         /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
         /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
         /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
         /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
      
       NOTE:
       1. It's invalid to set both this and "per_process_gpu_memory_fraction"
          at the same time.
       2. Currently this setting is per-process, not per-session. Using
          different settings in different sessions within same process will
          result in undefined behavior.
       
      repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
    • getNumVirtualDevicesPerGpu

      public int getNumVirtualDevicesPerGpu()
       The number of virtual devices to create on each visible GPU. The
       available memory will be split equally among all virtual devices. If the
       field `memory_limit_mb` in `VirtualDevices` is not empty, this field will
       be ignored.
       
      int32 num_virtual_devices_per_gpu = 15;
      Specified by:
      getNumVirtualDevicesPerGpu in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The numVirtualDevicesPerGpu.
    • setNumVirtualDevicesPerGpu

      public GPUOptions.Experimental.Builder setNumVirtualDevicesPerGpu(int value)
       The number of virtual devices to create on each visible GPU. The
       available memory will be split equally among all virtual devices. If the
       field `memory_limit_mb` in `VirtualDevices` is not empty, this field will
       be ignored.
       
      int32 num_virtual_devices_per_gpu = 15;
      Parameters:
      value - The numVirtualDevicesPerGpu to set.
      Returns:
      This builder for chaining.
    • clearNumVirtualDevicesPerGpu

      public GPUOptions.Experimental.Builder clearNumVirtualDevicesPerGpu()
       The number of virtual devices to create on each visible GPU. The
       available memory will be split equally among all virtual devices. If the
       field `memory_limit_mb` in `VirtualDevices` is not empty, this field will
       be ignored.
       
      int32 num_virtual_devices_per_gpu = 15;
      Returns:
      This builder for chaining.
    • getUseUnifiedMemory

      public boolean getUseUnifiedMemory()
       If true, uses CUDA unified memory for memory allocations. If
       per_process_gpu_memory_fraction option is greater than 1.0, then unified
       memory is used regardless of the value for this field. See comments for
       per_process_gpu_memory_fraction field for more details and requirements
       of the unified memory. This option is useful to oversubscribe memory if
       multiple processes are sharing a single GPU while individually using less
       than 1.0 per process memory fraction.
       
      bool use_unified_memory = 2;
      Specified by:
      getUseUnifiedMemory in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The useUnifiedMemory.
    • setUseUnifiedMemory

      public GPUOptions.Experimental.Builder setUseUnifiedMemory(boolean value)
       If true, uses CUDA unified memory for memory allocations. If
       per_process_gpu_memory_fraction option is greater than 1.0, then unified
       memory is used regardless of the value for this field. See comments for
       per_process_gpu_memory_fraction field for more details and requirements
       of the unified memory. This option is useful to oversubscribe memory if
       multiple processes are sharing a single GPU while individually using less
       than 1.0 per process memory fraction.
       
      bool use_unified_memory = 2;
      Parameters:
      value - The useUnifiedMemory to set.
      Returns:
      This builder for chaining.
    • clearUseUnifiedMemory

      public GPUOptions.Experimental.Builder clearUseUnifiedMemory()
       If true, uses CUDA unified memory for memory allocations. If
       per_process_gpu_memory_fraction option is greater than 1.0, then unified
       memory is used regardless of the value for this field. See comments for
       per_process_gpu_memory_fraction field for more details and requirements
       of the unified memory. This option is useful to oversubscribe memory if
       multiple processes are sharing a single GPU while individually using less
       than 1.0 per process memory fraction.
       
      bool use_unified_memory = 2;
      Returns:
      This builder for chaining.
    • getNumDevToDevCopyStreams

      public int getNumDevToDevCopyStreams()
       If > 1, the number of device-to-device copy streams to create
       for each GPUDevice.  Default value is 0, which is automatically
       converted to 1.
       
      int32 num_dev_to_dev_copy_streams = 3;
      Specified by:
      getNumDevToDevCopyStreams in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The numDevToDevCopyStreams.
    • setNumDevToDevCopyStreams

      public GPUOptions.Experimental.Builder setNumDevToDevCopyStreams(int value)
       If > 1, the number of device-to-device copy streams to create
       for each GPUDevice.  Default value is 0, which is automatically
       converted to 1.
       
      int32 num_dev_to_dev_copy_streams = 3;
      Parameters:
      value - The numDevToDevCopyStreams to set.
      Returns:
      This builder for chaining.
    • clearNumDevToDevCopyStreams

      public GPUOptions.Experimental.Builder clearNumDevToDevCopyStreams()
       If > 1, the number of device-to-device copy streams to create
       for each GPUDevice.  Default value is 0, which is automatically
       converted to 1.
       
      int32 num_dev_to_dev_copy_streams = 3;
      Returns:
      This builder for chaining.
    • getCollectiveRingOrder

      public String getCollectiveRingOrder()
       If non-empty, defines a good GPU ring order on a single worker based on
       device interconnect.  This assumes that all workers have the same GPU
       topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       This ring order is used by the RingReducer implementation of
       CollectiveReduce, and serves as an override to automatic ring order
       generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       
      string collective_ring_order = 4;
      Specified by:
      getCollectiveRingOrder in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The collectiveRingOrder.
    • getCollectiveRingOrderBytes

      public com.google.protobuf.ByteString getCollectiveRingOrderBytes()
       If non-empty, defines a good GPU ring order on a single worker based on
       device interconnect.  This assumes that all workers have the same GPU
       topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       This ring order is used by the RingReducer implementation of
       CollectiveReduce, and serves as an override to automatic ring order
       generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       
      string collective_ring_order = 4;
      Specified by:
      getCollectiveRingOrderBytes in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The bytes for collectiveRingOrder.
    • setCollectiveRingOrder

      public GPUOptions.Experimental.Builder setCollectiveRingOrder(String value)
       If non-empty, defines a good GPU ring order on a single worker based on
       device interconnect.  This assumes that all workers have the same GPU
       topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       This ring order is used by the RingReducer implementation of
       CollectiveReduce, and serves as an override to automatic ring order
       generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       
      string collective_ring_order = 4;
      Parameters:
      value - The collectiveRingOrder to set.
      Returns:
      This builder for chaining.
    • clearCollectiveRingOrder

      public GPUOptions.Experimental.Builder clearCollectiveRingOrder()
       If non-empty, defines a good GPU ring order on a single worker based on
       device interconnect.  This assumes that all workers have the same GPU
       topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       This ring order is used by the RingReducer implementation of
       CollectiveReduce, and serves as an override to automatic ring order
       generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       
      string collective_ring_order = 4;
      Returns:
      This builder for chaining.
    • setCollectiveRingOrderBytes

      public GPUOptions.Experimental.Builder setCollectiveRingOrderBytes(com.google.protobuf.ByteString value)
       If non-empty, defines a good GPU ring order on a single worker based on
       device interconnect.  This assumes that all workers have the same GPU
       topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       This ring order is used by the RingReducer implementation of
       CollectiveReduce, and serves as an override to automatic ring order
       generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       
      string collective_ring_order = 4;
      Parameters:
      value - The bytes for collectiveRingOrder to set.
      Returns:
      This builder for chaining.
    • getTimestampedAllocator

      public boolean getTimestampedAllocator()
       If true then extra work is done by GPUDevice and GPUBFCAllocator to
       keep track of when GPU memory is freed and when kernels actually
       complete so that we can know when a nominally free memory chunk
       is really not subject to pending use.
       
      bool timestamped_allocator = 5;
      Specified by:
      getTimestampedAllocator in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The timestampedAllocator.
    • setTimestampedAllocator

      public GPUOptions.Experimental.Builder setTimestampedAllocator(boolean value)
       If true then extra work is done by GPUDevice and GPUBFCAllocator to
       keep track of when GPU memory is freed and when kernels actually
       complete so that we can know when a nominally free memory chunk
       is really not subject to pending use.
       
      bool timestamped_allocator = 5;
      Parameters:
      value - The timestampedAllocator to set.
      Returns:
      This builder for chaining.
    • clearTimestampedAllocator

      public GPUOptions.Experimental.Builder clearTimestampedAllocator()
       If true then extra work is done by GPUDevice and GPUBFCAllocator to
       keep track of when GPU memory is freed and when kernels actually
       complete so that we can know when a nominally free memory chunk
       is really not subject to pending use.
       
      bool timestamped_allocator = 5;
      Returns:
      This builder for chaining.
    • getKernelTrackerMaxInterval

      public int getKernelTrackerMaxInterval()
       Parameters for GPUKernelTracker.  By default no kernel tracking is done.
       Note that timestamped_allocator is only effective if some tracking is
       specified.
      
       If kernel_tracker_max_interval = n > 0, then a tracking event
       is inserted after every n kernels without an event.
       
      int32 kernel_tracker_max_interval = 7;
      Specified by:
      getKernelTrackerMaxInterval in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The kernelTrackerMaxInterval.
    • setKernelTrackerMaxInterval

      public GPUOptions.Experimental.Builder setKernelTrackerMaxInterval(int value)
       Parameters for GPUKernelTracker.  By default no kernel tracking is done.
       Note that timestamped_allocator is only effective if some tracking is
       specified.
      
       If kernel_tracker_max_interval = n > 0, then a tracking event
       is inserted after every n kernels without an event.
       
      int32 kernel_tracker_max_interval = 7;
      Parameters:
      value - The kernelTrackerMaxInterval to set.
      Returns:
      This builder for chaining.
    • clearKernelTrackerMaxInterval

      public GPUOptions.Experimental.Builder clearKernelTrackerMaxInterval()
       Parameters for GPUKernelTracker.  By default no kernel tracking is done.
       Note that timestamped_allocator is only effective if some tracking is
       specified.
      
       If kernel_tracker_max_interval = n > 0, then a tracking event
       is inserted after every n kernels without an event.
       
      int32 kernel_tracker_max_interval = 7;
      Returns:
      This builder for chaining.
    • getKernelTrackerMaxBytes

      public int getKernelTrackerMaxBytes()
       If kernel_tracker_max_bytes = n > 0, then a tracking event is
       inserted after every series of kernels allocating a sum of
       memory >= n.  If one kernel allocates b * n bytes, then one
       event will be inserted after it, but it will count as b against
       the pending limit.
       
      int32 kernel_tracker_max_bytes = 8;
      Specified by:
      getKernelTrackerMaxBytes in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The kernelTrackerMaxBytes.
    • setKernelTrackerMaxBytes

      public GPUOptions.Experimental.Builder setKernelTrackerMaxBytes(int value)
       If kernel_tracker_max_bytes = n > 0, then a tracking event is
       inserted after every series of kernels allocating a sum of
       memory >= n.  If one kernel allocates b * n bytes, then one
       event will be inserted after it, but it will count as b against
       the pending limit.
       
      int32 kernel_tracker_max_bytes = 8;
      Parameters:
      value - The kernelTrackerMaxBytes to set.
      Returns:
      This builder for chaining.
    • clearKernelTrackerMaxBytes

      public GPUOptions.Experimental.Builder clearKernelTrackerMaxBytes()
       If kernel_tracker_max_bytes = n > 0, then a tracking event is
       inserted after every series of kernels allocating a sum of
       memory >= n.  If one kernel allocates b * n bytes, then one
       event will be inserted after it, but it will count as b against
       the pending limit.
       
      int32 kernel_tracker_max_bytes = 8;
      Returns:
      This builder for chaining.
    • getKernelTrackerMaxPending

      public int getKernelTrackerMaxPending()
       If kernel_tracker_max_pending > 0 then no more than this many
       tracking events can be outstanding at a time.  An attempt to
       launch an additional kernel will stall until an event
       completes.
       
      int32 kernel_tracker_max_pending = 9;
      Specified by:
      getKernelTrackerMaxPending in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The kernelTrackerMaxPending.
    • setKernelTrackerMaxPending

      public GPUOptions.Experimental.Builder setKernelTrackerMaxPending(int value)
       If kernel_tracker_max_pending > 0 then no more than this many
       tracking events can be outstanding at a time.  An attempt to
       launch an additional kernel will stall until an event
       completes.
       
      int32 kernel_tracker_max_pending = 9;
      Parameters:
      value - The kernelTrackerMaxPending to set.
      Returns:
      This builder for chaining.
    • clearKernelTrackerMaxPending

      public GPUOptions.Experimental.Builder clearKernelTrackerMaxPending()
       If kernel_tracker_max_pending > 0 then no more than this many
       tracking events can be outstanding at a time.  An attempt to
       launch an additional kernel will stall until an event
       completes.
       
      int32 kernel_tracker_max_pending = 9;
      Returns:
      This builder for chaining.
    • getInternalFragmentationFraction

      public double getInternalFragmentationFraction()
       BFC Allocator can return an allocated chunk of memory upto 2x the
       requested size. For virtual devices with tight memory constraints, and
       proportionately large allocation requests, this can lead to a significant
       reduction in available memory. The threshold below controls when a chunk
       should be split if the chunk size exceeds requested memory size. It is
       expressed as a fraction of total available memory for the tf device. For
       example setting it to 0.05 would imply a chunk needs to be split if its
       size exceeds the requested memory by 5% of the total virtual device/gpu
       memory size.
       
      double internal_fragmentation_fraction = 10;
      Specified by:
      getInternalFragmentationFraction in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The internalFragmentationFraction.
    • setInternalFragmentationFraction

      public GPUOptions.Experimental.Builder setInternalFragmentationFraction(double value)
       BFC Allocator can return an allocated chunk of memory upto 2x the
       requested size. For virtual devices with tight memory constraints, and
       proportionately large allocation requests, this can lead to a significant
       reduction in available memory. The threshold below controls when a chunk
       should be split if the chunk size exceeds requested memory size. It is
       expressed as a fraction of total available memory for the tf device. For
       example setting it to 0.05 would imply a chunk needs to be split if its
       size exceeds the requested memory by 5% of the total virtual device/gpu
       memory size.
       
      double internal_fragmentation_fraction = 10;
      Parameters:
      value - The internalFragmentationFraction to set.
      Returns:
      This builder for chaining.
    • clearInternalFragmentationFraction

      public GPUOptions.Experimental.Builder clearInternalFragmentationFraction()
       BFC Allocator can return an allocated chunk of memory upto 2x the
       requested size. For virtual devices with tight memory constraints, and
       proportionately large allocation requests, this can lead to a significant
       reduction in available memory. The threshold below controls when a chunk
       should be split if the chunk size exceeds requested memory size. It is
       expressed as a fraction of total available memory for the tf device. For
       example setting it to 0.05 would imply a chunk needs to be split if its
       size exceeds the requested memory by 5% of the total virtual device/gpu
       memory size.
       
      double internal_fragmentation_fraction = 10;
      Returns:
      This builder for chaining.
    • getUseCudaMallocAsync

      public boolean getUseCudaMallocAsync()
       When true, use CUDA cudaMallocAsync API instead of TF gpu allocator.
       
      bool use_cuda_malloc_async = 11;
      Specified by:
      getUseCudaMallocAsync in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The useCudaMallocAsync.
    • setUseCudaMallocAsync

      public GPUOptions.Experimental.Builder setUseCudaMallocAsync(boolean value)
       When true, use CUDA cudaMallocAsync API instead of TF gpu allocator.
       
      bool use_cuda_malloc_async = 11;
      Parameters:
      value - The useCudaMallocAsync to set.
      Returns:
      This builder for chaining.
    • clearUseCudaMallocAsync

      public GPUOptions.Experimental.Builder clearUseCudaMallocAsync()
       When true, use CUDA cudaMallocAsync API instead of TF gpu allocator.
       
      bool use_cuda_malloc_async = 11;
      Returns:
      This builder for chaining.
    • getDisallowRetryOnAllocationFailure

      public boolean getDisallowRetryOnAllocationFailure()
       By default, BFCAllocator may sleep when it runs out of memory, in the
       hopes that another thread will free up memory in the meantime.  Setting
       this to true disables the sleep; instead we'll OOM immediately.
       
      bool disallow_retry_on_allocation_failure = 12;
      Specified by:
      getDisallowRetryOnAllocationFailure in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The disallowRetryOnAllocationFailure.
    • setDisallowRetryOnAllocationFailure

      public GPUOptions.Experimental.Builder setDisallowRetryOnAllocationFailure(boolean value)
       By default, BFCAllocator may sleep when it runs out of memory, in the
       hopes that another thread will free up memory in the meantime.  Setting
       this to true disables the sleep; instead we'll OOM immediately.
       
      bool disallow_retry_on_allocation_failure = 12;
      Parameters:
      value - The disallowRetryOnAllocationFailure to set.
      Returns:
      This builder for chaining.
    • clearDisallowRetryOnAllocationFailure

      public GPUOptions.Experimental.Builder clearDisallowRetryOnAllocationFailure()
       By default, BFCAllocator may sleep when it runs out of memory, in the
       hopes that another thread will free up memory in the meantime.  Setting
       this to true disables the sleep; instead we'll OOM immediately.
       
      bool disallow_retry_on_allocation_failure = 12;
      Returns:
      This builder for chaining.
    • getGpuHostMemLimitInMb

      public float getGpuHostMemLimitInMb()
       Memory limit for "GPU host allocator", aka pinned memory allocator.  This
       can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
       
      float gpu_host_mem_limit_in_mb = 13;
      Specified by:
      getGpuHostMemLimitInMb in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The gpuHostMemLimitInMb.
    • setGpuHostMemLimitInMb

      public GPUOptions.Experimental.Builder setGpuHostMemLimitInMb(float value)
       Memory limit for "GPU host allocator", aka pinned memory allocator.  This
       can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
       
      float gpu_host_mem_limit_in_mb = 13;
      Parameters:
      value - The gpuHostMemLimitInMb to set.
      Returns:
      This builder for chaining.
    • clearGpuHostMemLimitInMb

      public GPUOptions.Experimental.Builder clearGpuHostMemLimitInMb()
       Memory limit for "GPU host allocator", aka pinned memory allocator.  This
       can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
       
      float gpu_host_mem_limit_in_mb = 13;
      Returns:
      This builder for chaining.
    • getGpuHostMemDisallowGrowth

      public boolean getGpuHostMemDisallowGrowth()
       If true, then the host allocator allocates its max memory all upfront and
       never grows.  This can be useful for latency-sensitive systems, because
       growing the GPU host memory pool can be expensive.
      
       You probably only want to use this in combination with
       gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
       quite high.
       
      bool gpu_host_mem_disallow_growth = 14;
      Specified by:
      getGpuHostMemDisallowGrowth in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The gpuHostMemDisallowGrowth.
    • setGpuHostMemDisallowGrowth

      public GPUOptions.Experimental.Builder setGpuHostMemDisallowGrowth(boolean value)
       If true, then the host allocator allocates its max memory all upfront and
       never grows.  This can be useful for latency-sensitive systems, because
       growing the GPU host memory pool can be expensive.
      
       You probably only want to use this in combination with
       gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
       quite high.
       
      bool gpu_host_mem_disallow_growth = 14;
      Parameters:
      value - The gpuHostMemDisallowGrowth to set.
      Returns:
      This builder for chaining.
    • clearGpuHostMemDisallowGrowth

      public GPUOptions.Experimental.Builder clearGpuHostMemDisallowGrowth()
       If true, then the host allocator allocates its max memory all upfront and
       never grows.  This can be useful for latency-sensitive systems, because
       growing the GPU host memory pool can be expensive.
      
       You probably only want to use this in combination with
       gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
       quite high.
       
      bool gpu_host_mem_disallow_growth = 14;
      Returns:
      This builder for chaining.
    • getGpuSystemMemorySizeInMb

      public int getGpuSystemMemorySizeInMb()
       Memory limit for gpu system. This can also be set by
       TF_DEVICE_MIN_SYS_MEMORY_IN_MB, which takes precedence over
       gpu_system_memory_size_in_mb. With this, user can configure the gpu
       system memory size for better resource estimation of multi-tenancy(one
       gpu with multiple model) use case.
       
      int32 gpu_system_memory_size_in_mb = 16;
      Specified by:
      getGpuSystemMemorySizeInMb in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The gpuSystemMemorySizeInMb.
    • setGpuSystemMemorySizeInMb

      public GPUOptions.Experimental.Builder setGpuSystemMemorySizeInMb(int value)
       Memory limit for gpu system. This can also be set by
       TF_DEVICE_MIN_SYS_MEMORY_IN_MB, which takes precedence over
       gpu_system_memory_size_in_mb. With this, user can configure the gpu
       system memory size for better resource estimation of multi-tenancy(one
       gpu with multiple model) use case.
       
      int32 gpu_system_memory_size_in_mb = 16;
      Parameters:
      value - The gpuSystemMemorySizeInMb to set.
      Returns:
      This builder for chaining.
    • clearGpuSystemMemorySizeInMb

      public GPUOptions.Experimental.Builder clearGpuSystemMemorySizeInMb()
       Memory limit for gpu system. This can also be set by
       TF_DEVICE_MIN_SYS_MEMORY_IN_MB, which takes precedence over
       gpu_system_memory_size_in_mb. With this, user can configure the gpu
       system memory size for better resource estimation of multi-tenancy(one
       gpu with multiple model) use case.
       
      int32 gpu_system_memory_size_in_mb = 16;
      Returns:
      This builder for chaining.
    • getPopulatePjrtGpuClientCreationInfo

      public boolean getPopulatePjrtGpuClientCreationInfo()
       If true, save information needed for created a PjRt GPU client for
       creating a client with remote devices.
       
      bool populate_pjrt_gpu_client_creation_info = 17;
      Specified by:
      getPopulatePjrtGpuClientCreationInfo in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The populatePjrtGpuClientCreationInfo.
    • setPopulatePjrtGpuClientCreationInfo

      public GPUOptions.Experimental.Builder setPopulatePjrtGpuClientCreationInfo(boolean value)
       If true, save information needed for created a PjRt GPU client for
       creating a client with remote devices.
       
      bool populate_pjrt_gpu_client_creation_info = 17;
      Parameters:
      value - The populatePjrtGpuClientCreationInfo to set.
      Returns:
      This builder for chaining.
    • clearPopulatePjrtGpuClientCreationInfo

      public GPUOptions.Experimental.Builder clearPopulatePjrtGpuClientCreationInfo()
       If true, save information needed for created a PjRt GPU client for
       creating a client with remote devices.
       
      bool populate_pjrt_gpu_client_creation_info = 17;
      Returns:
      This builder for chaining.
    • getNodeId

      public int getNodeId()
       node_id for use when creating a PjRt GPU client with remote devices,
       which enumerates jobs*tasks from a ServerDef.
       
      int32 node_id = 18;
      Specified by:
      getNodeId in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The nodeId.
    • setNodeId

      public GPUOptions.Experimental.Builder setNodeId(int value)
       node_id for use when creating a PjRt GPU client with remote devices,
       which enumerates jobs*tasks from a ServerDef.
       
      int32 node_id = 18;
      Parameters:
      value - The nodeId to set.
      Returns:
      This builder for chaining.
    • clearNodeId

      public GPUOptions.Experimental.Builder clearNodeId()
       node_id for use when creating a PjRt GPU client with remote devices,
       which enumerates jobs*tasks from a ServerDef.
       
      int32 node_id = 18;
      Returns:
      This builder for chaining.
    • hasStreamMergeOptions

      public boolean hasStreamMergeOptions()
      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
      Specified by:
      hasStreamMergeOptions in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      Whether the streamMergeOptions field is set.
    • getStreamMergeOptions

      public GPUOptions.Experimental.StreamMergeOptions getStreamMergeOptions()
      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
      Specified by:
      getStreamMergeOptions in interface GPUOptions.ExperimentalOrBuilder
      Returns:
      The streamMergeOptions.
    • setStreamMergeOptions

      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
    • setStreamMergeOptions

      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
    • mergeStreamMergeOptions

      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
    • clearStreamMergeOptions

      public GPUOptions.Experimental.Builder clearStreamMergeOptions()
      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
    • getStreamMergeOptionsBuilder

      public GPUOptions.Experimental.StreamMergeOptions.Builder getStreamMergeOptionsBuilder()
      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
    • getStreamMergeOptionsOrBuilder

      public GPUOptions.Experimental.StreamMergeOptionsOrBuilder getStreamMergeOptionsOrBuilder()
      .tensorflow.GPUOptions.Experimental.StreamMergeOptions stream_merge_options = 19;
      Specified by:
      getStreamMergeOptionsOrBuilder in interface GPUOptions.ExperimentalOrBuilder
    • setUnknownFields

      public final GPUOptions.Experimental.Builder setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
      Specified by:
      setUnknownFields in interface com.google.protobuf.Message.Builder
      Overrides:
      setUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>
    • mergeUnknownFields

      public final GPUOptions.Experimental.Builder mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
      Specified by:
      mergeUnknownFields in interface com.google.protobuf.Message.Builder
      Overrides:
      mergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Experimental.Builder>